]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp.c
[NET]: Add generic segmentation offload
[net-next-2.6.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
250#include <linux/config.h>
251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
256#include <linux/smp_lock.h>
257#include <linux/fs.h>
258#include <linux/random.h>
259#include <linux/bootmem.h>
b8059ead 260#include <linux/cache.h>
1da177e4
LT
261
262#include <net/icmp.h>
263#include <net/tcp.h>
264#include <net/xfrm.h>
265#include <net/ip.h>
1a2449a8 266#include <net/netdma.h>
1da177e4
LT
267
268#include <asm/uaccess.h>
269#include <asm/ioctls.h>
270
271int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
272
ba89966c 273DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
1da177e4 274
1da177e4
LT
275atomic_t tcp_orphan_count = ATOMIC_INIT(0);
276
0a5578cf
ACM
277EXPORT_SYMBOL_GPL(tcp_orphan_count);
278
b8059ead
DM
279int sysctl_tcp_mem[3] __read_mostly;
280int sysctl_tcp_wmem[3] __read_mostly;
281int sysctl_tcp_rmem[3] __read_mostly;
1da177e4
LT
282
283EXPORT_SYMBOL(sysctl_tcp_mem);
284EXPORT_SYMBOL(sysctl_tcp_rmem);
285EXPORT_SYMBOL(sysctl_tcp_wmem);
286
287atomic_t tcp_memory_allocated; /* Current allocated memory. */
288atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
289
290EXPORT_SYMBOL(tcp_memory_allocated);
291EXPORT_SYMBOL(tcp_sockets_allocated);
292
293/*
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
298 */
299int tcp_memory_pressure;
300
301EXPORT_SYMBOL(tcp_memory_pressure);
302
303void tcp_enter_memory_pressure(void)
304{
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
308 }
309}
310
311EXPORT_SYMBOL(tcp_enter_memory_pressure);
312
1da177e4
LT
313/*
314 * Wait for a TCP event.
315 *
316 * Note that we don't need to lock the socket, as the upper poll layers
317 * take care of normal races (between the test and the event) and we don't
318 * go look at any of the socket buffers directly.
319 */
320unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
321{
322 unsigned int mask;
323 struct sock *sk = sock->sk;
324 struct tcp_sock *tp = tcp_sk(sk);
325
326 poll_wait(file, sk->sk_sleep, wait);
327 if (sk->sk_state == TCP_LISTEN)
dc40c7bc 328 return inet_csk_listen_poll(sk);
1da177e4
LT
329
330 /* Socket is not locked. We are protected from async events
331 by poll logic and correct handling of state changes
332 made by another threads is impossible in any case.
333 */
334
335 mask = 0;
336 if (sk->sk_err)
337 mask = POLLERR;
338
339 /*
340 * POLLHUP is certainly not done right. But poll() doesn't
341 * have a notion of HUP in just one direction, and for a
342 * socket the read side is more interesting.
343 *
344 * Some poll() documentation says that POLLHUP is incompatible
345 * with the POLLOUT/POLLWR flags, so somebody should check this
346 * all. But careful, it tends to be safer to return too many
347 * bits than too few, and you can easily break real applications
348 * if you don't tell them that something has hung up!
349 *
350 * Check-me.
351 *
352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
353 * our fs/select.c). It means that after we received EOF,
354 * poll always returns immediately, making impossible poll() on write()
355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
356 * if and only if shutdown has been made in both directions.
357 * Actually, it is interesting to look how Solaris and DUX
358 * solve this dilemma. I would prefer, if PULLHUP were maskable,
359 * then we could set it on SND_SHUTDOWN. BTW examples given
360 * in Stevens' books assume exactly this behaviour, it explains
361 * why PULLHUP is incompatible with POLLOUT. --ANK
362 *
363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
364 * blocking on fresh not-connected or disconnected socket. --ANK
365 */
366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
367 mask |= POLLHUP;
368 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
1da177e4
LT
370
371 /* Connected? */
372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
373 /* Potential race condition. If read of tp below will
374 * escape above sk->sk_state, we can be illegally awaken
375 * in SYN_* states. */
376 if ((tp->rcv_nxt != tp->copied_seq) &&
377 (tp->urg_seq != tp->copied_seq ||
378 tp->rcv_nxt != tp->copied_seq + 1 ||
379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
380 mask |= POLLIN | POLLRDNORM;
381
382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
384 mask |= POLLOUT | POLLWRNORM;
385 } else { /* send SIGIO later */
386 set_bit(SOCK_ASYNC_NOSPACE,
387 &sk->sk_socket->flags);
388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
389
390 /* Race breaker. If space is freed after
391 * wspace test but before the flags are set,
392 * IO signal will be lost.
393 */
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
395 mask |= POLLOUT | POLLWRNORM;
396 }
397 }
398
399 if (tp->urg_data & TCP_URG_VALID)
400 mask |= POLLPRI;
401 }
402 return mask;
403}
404
405int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
406{
407 struct tcp_sock *tp = tcp_sk(sk);
408 int answ;
409
410 switch (cmd) {
411 case SIOCINQ:
412 if (sk->sk_state == TCP_LISTEN)
413 return -EINVAL;
414
415 lock_sock(sk);
416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
417 answ = 0;
418 else if (sock_flag(sk, SOCK_URGINLINE) ||
419 !tp->urg_data ||
420 before(tp->urg_seq, tp->copied_seq) ||
421 !before(tp->urg_seq, tp->rcv_nxt)) {
422 answ = tp->rcv_nxt - tp->copied_seq;
423
424 /* Subtract 1, if FIN is in queue. */
425 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
426 answ -=
427 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
428 } else
429 answ = tp->urg_seq - tp->copied_seq;
430 release_sock(sk);
431 break;
432 case SIOCATMARK:
433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
434 break;
435 case SIOCOUTQ:
436 if (sk->sk_state == TCP_LISTEN)
437 return -EINVAL;
438
439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
440 answ = 0;
441 else
442 answ = tp->write_seq - tp->snd_una;
443 break;
444 default:
445 return -ENOIOCTLCMD;
446 };
447
448 return put_user(answ, (int __user *)arg);
449}
450
1da177e4
LT
451static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
452{
453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
454 tp->pushed_seq = tp->write_seq;
455}
456
457static inline int forced_push(struct tcp_sock *tp)
458{
459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
460}
461
462static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
463 struct sk_buff *skb)
464{
465 skb->csum = 0;
466 TCP_SKB_CB(skb)->seq = tp->write_seq;
467 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
468 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
469 TCP_SKB_CB(skb)->sacked = 0;
470 skb_header_release(skb);
471 __skb_queue_tail(&sk->sk_write_queue, skb);
472 sk_charge_skb(sk, skb);
473 if (!sk->sk_send_head)
474 sk->sk_send_head = skb;
89ebd197 475 if (tp->nonagle & TCP_NAGLE_PUSH)
1da177e4
LT
476 tp->nonagle &= ~TCP_NAGLE_PUSH;
477}
478
479static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
480 struct sk_buff *skb)
481{
482 if (flags & MSG_OOB) {
483 tp->urg_mode = 1;
484 tp->snd_up = tp->write_seq;
485 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
486 }
487}
488
489static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
490 int mss_now, int nonagle)
491{
492 if (sk->sk_send_head) {
493 struct sk_buff *skb = sk->sk_write_queue.prev;
494 if (!(flags & MSG_MORE) || forced_push(tp))
495 tcp_mark_push(tp, skb);
496 tcp_mark_urg(tp, flags, skb);
497 __tcp_push_pending_frames(sk, tp, mss_now,
498 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
499 }
500}
501
502static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
503 size_t psize, int flags)
504{
505 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 506 int mss_now, size_goal;
1da177e4
LT
507 int err;
508 ssize_t copied;
509 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
510
511 /* Wait for a connection to finish. */
512 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
513 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
514 goto out_err;
515
516 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
517
518 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 519 size_goal = tp->xmit_size_goal;
1da177e4
LT
520 copied = 0;
521
522 err = -EPIPE;
523 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
524 goto do_error;
525
526 while (psize > 0) {
527 struct sk_buff *skb = sk->sk_write_queue.prev;
528 struct page *page = pages[poffset / PAGE_SIZE];
529 int copy, i, can_coalesce;
530 int offset = poffset % PAGE_SIZE;
531 int size = min_t(size_t, psize, PAGE_SIZE - offset);
532
c1b4a7e6 533 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
534new_segment:
535 if (!sk_stream_memory_free(sk))
536 goto wait_for_sndbuf;
537
538 skb = sk_stream_alloc_pskb(sk, 0, 0,
539 sk->sk_allocation);
540 if (!skb)
541 goto wait_for_memory;
542
543 skb_entail(sk, tp, skb);
c1b4a7e6 544 copy = size_goal;
1da177e4
LT
545 }
546
547 if (copy > size)
548 copy = size;
549
550 i = skb_shinfo(skb)->nr_frags;
551 can_coalesce = skb_can_coalesce(skb, i, page, offset);
552 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
553 tcp_mark_push(tp, skb);
554 goto new_segment;
555 }
d80d99d6 556 if (!sk_stream_wmem_schedule(sk, copy))
1da177e4
LT
557 goto wait_for_memory;
558
559 if (can_coalesce) {
560 skb_shinfo(skb)->frags[i - 1].size += copy;
561 } else {
562 get_page(page);
563 skb_fill_page_desc(skb, i, page, offset, copy);
564 }
565
566 skb->len += copy;
567 skb->data_len += copy;
568 skb->truesize += copy;
569 sk->sk_wmem_queued += copy;
570 sk->sk_forward_alloc -= copy;
571 skb->ip_summed = CHECKSUM_HW;
572 tp->write_seq += copy;
573 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 574 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
575
576 if (!copied)
577 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
578
579 copied += copy;
580 poffset += copy;
581 if (!(psize -= copy))
582 goto out;
583
c1b4a7e6 584 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
585 continue;
586
587 if (forced_push(tp)) {
588 tcp_mark_push(tp, skb);
589 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
590 } else if (skb == sk->sk_send_head)
591 tcp_push_one(sk, mss_now);
592 continue;
593
594wait_for_sndbuf:
595 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
596wait_for_memory:
597 if (copied)
598 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
599
600 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
601 goto do_error;
602
603 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 604 size_goal = tp->xmit_size_goal;
1da177e4
LT
605 }
606
607out:
608 if (copied)
609 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
610 return copied;
611
612do_error:
613 if (copied)
614 goto out;
615out_err:
616 return sk_stream_error(sk, flags, err);
617}
618
619ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
620 size_t size, int flags)
621{
622 ssize_t res;
623 struct sock *sk = sock->sk;
624
1da177e4 625 if (!(sk->sk_route_caps & NETIF_F_SG) ||
8648b305 626 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1da177e4
LT
627 return sock_no_sendpage(sock, page, offset, size, flags);
628
1da177e4
LT
629 lock_sock(sk);
630 TCP_CHECK_TIMER(sk);
631 res = do_tcp_sendpages(sk, &page, offset, size, flags);
632 TCP_CHECK_TIMER(sk);
633 release_sock(sk);
634 return res;
635}
636
637#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
638#define TCP_OFF(sk) (sk->sk_sndmsg_off)
639
640static inline int select_size(struct sock *sk, struct tcp_sock *tp)
641{
c1b4a7e6 642 int tmp = tp->mss_cache;
1da177e4 643
b4e26f5e
DM
644 if (sk->sk_route_caps & NETIF_F_SG) {
645 if (sk->sk_route_caps & NETIF_F_TSO)
646 tmp = 0;
647 else {
648 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
649
650 if (tmp >= pgbreak &&
651 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
652 tmp = pgbreak;
653 }
654 }
1da177e4 655
1da177e4
LT
656 return tmp;
657}
658
659int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
660 size_t size)
661{
662 struct iovec *iov;
663 struct tcp_sock *tp = tcp_sk(sk);
664 struct sk_buff *skb;
665 int iovlen, flags;
c1b4a7e6 666 int mss_now, size_goal;
1da177e4
LT
667 int err, copied;
668 long timeo;
669
670 lock_sock(sk);
671 TCP_CHECK_TIMER(sk);
672
673 flags = msg->msg_flags;
674 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
675
676 /* Wait for a connection to finish. */
677 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
678 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
679 goto out_err;
680
681 /* This should be in poll */
682 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
683
684 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 685 size_goal = tp->xmit_size_goal;
1da177e4
LT
686
687 /* Ok commence sending. */
688 iovlen = msg->msg_iovlen;
689 iov = msg->msg_iov;
690 copied = 0;
691
692 err = -EPIPE;
693 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
694 goto do_error;
695
696 while (--iovlen >= 0) {
697 int seglen = iov->iov_len;
698 unsigned char __user *from = iov->iov_base;
699
700 iov++;
701
702 while (seglen > 0) {
703 int copy;
704
705 skb = sk->sk_write_queue.prev;
706
707 if (!sk->sk_send_head ||
c1b4a7e6 708 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
709
710new_segment:
711 /* Allocate new segment. If the interface is SG,
712 * allocate skb fitting to single page.
713 */
714 if (!sk_stream_memory_free(sk))
715 goto wait_for_sndbuf;
716
717 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
718 0, sk->sk_allocation);
719 if (!skb)
720 goto wait_for_memory;
721
722 /*
723 * Check whether we can use HW checksum.
724 */
8648b305 725 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1da177e4
LT
726 skb->ip_summed = CHECKSUM_HW;
727
728 skb_entail(sk, tp, skb);
c1b4a7e6 729 copy = size_goal;
1da177e4
LT
730 }
731
732 /* Try to append data to the end of skb. */
733 if (copy > seglen)
734 copy = seglen;
735
736 /* Where to copy to? */
737 if (skb_tailroom(skb) > 0) {
738 /* We have some space in skb head. Superb! */
739 if (copy > skb_tailroom(skb))
740 copy = skb_tailroom(skb);
741 if ((err = skb_add_data(skb, from, copy)) != 0)
742 goto do_fault;
743 } else {
744 int merge = 0;
745 int i = skb_shinfo(skb)->nr_frags;
746 struct page *page = TCP_PAGE(sk);
747 int off = TCP_OFF(sk);
748
749 if (skb_can_coalesce(skb, i, page, off) &&
750 off != PAGE_SIZE) {
751 /* We can extend the last page
752 * fragment. */
753 merge = 1;
754 } else if (i == MAX_SKB_FRAGS ||
755 (!i &&
756 !(sk->sk_route_caps & NETIF_F_SG))) {
757 /* Need to add new fragment and cannot
758 * do this because interface is non-SG,
759 * or because all the page slots are
760 * busy. */
761 tcp_mark_push(tp, skb);
762 goto new_segment;
763 } else if (page) {
1da177e4
LT
764 if (off == PAGE_SIZE) {
765 put_page(page);
766 TCP_PAGE(sk) = page = NULL;
fb5f5e6e 767 off = 0;
1da177e4 768 }
ef015786 769 } else
fb5f5e6e 770 off = 0;
ef015786
HX
771
772 if (copy > PAGE_SIZE - off)
773 copy = PAGE_SIZE - off;
774
775 if (!sk_stream_wmem_schedule(sk, copy))
776 goto wait_for_memory;
1da177e4
LT
777
778 if (!page) {
779 /* Allocate new cache page. */
780 if (!(page = sk_stream_alloc_page(sk)))
781 goto wait_for_memory;
1da177e4
LT
782 }
783
1da177e4
LT
784 /* Time to copy data. We are close to
785 * the end! */
786 err = skb_copy_to_page(sk, from, skb, page,
787 off, copy);
788 if (err) {
789 /* If this page was new, give it to the
790 * socket so it does not get leaked.
791 */
792 if (!TCP_PAGE(sk)) {
793 TCP_PAGE(sk) = page;
794 TCP_OFF(sk) = 0;
795 }
796 goto do_error;
797 }
798
799 /* Update the skb. */
800 if (merge) {
801 skb_shinfo(skb)->frags[i - 1].size +=
802 copy;
803 } else {
804 skb_fill_page_desc(skb, i, page, off, copy);
805 if (TCP_PAGE(sk)) {
806 get_page(page);
807 } else if (off + copy < PAGE_SIZE) {
808 get_page(page);
809 TCP_PAGE(sk) = page;
810 }
811 }
812
813 TCP_OFF(sk) = off + copy;
814 }
815
816 if (!copied)
817 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
818
819 tp->write_seq += copy;
820 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 821 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
822
823 from += copy;
824 copied += copy;
825 if ((seglen -= copy) == 0 && iovlen == 0)
826 goto out;
827
c1b4a7e6 828 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
829 continue;
830
831 if (forced_push(tp)) {
832 tcp_mark_push(tp, skb);
833 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
834 } else if (skb == sk->sk_send_head)
835 tcp_push_one(sk, mss_now);
836 continue;
837
838wait_for_sndbuf:
839 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
840wait_for_memory:
841 if (copied)
842 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
843
844 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
845 goto do_error;
846
847 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 848 size_goal = tp->xmit_size_goal;
1da177e4
LT
849 }
850 }
851
852out:
853 if (copied)
854 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
855 TCP_CHECK_TIMER(sk);
856 release_sock(sk);
857 return copied;
858
859do_fault:
860 if (!skb->len) {
861 if (sk->sk_send_head == skb)
862 sk->sk_send_head = NULL;
8728b834 863 __skb_unlink(skb, &sk->sk_write_queue);
1da177e4
LT
864 sk_stream_free_skb(sk, skb);
865 }
866
867do_error:
868 if (copied)
869 goto out;
870out_err:
871 err = sk_stream_error(sk, flags, err);
872 TCP_CHECK_TIMER(sk);
873 release_sock(sk);
874 return err;
875}
876
877/*
878 * Handle reading urgent data. BSD has very simple semantics for
879 * this, no blocking and very strange errors 8)
880 */
881
882static int tcp_recv_urg(struct sock *sk, long timeo,
883 struct msghdr *msg, int len, int flags,
884 int *addr_len)
885{
886 struct tcp_sock *tp = tcp_sk(sk);
887
888 /* No URG data to read. */
889 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
890 tp->urg_data == TCP_URG_READ)
891 return -EINVAL; /* Yes this is right ! */
892
893 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
894 return -ENOTCONN;
895
896 if (tp->urg_data & TCP_URG_VALID) {
897 int err = 0;
898 char c = tp->urg_data;
899
900 if (!(flags & MSG_PEEK))
901 tp->urg_data = TCP_URG_READ;
902
903 /* Read urgent data. */
904 msg->msg_flags |= MSG_OOB;
905
906 if (len > 0) {
907 if (!(flags & MSG_TRUNC))
908 err = memcpy_toiovec(msg->msg_iov, &c, 1);
909 len = 1;
910 } else
911 msg->msg_flags |= MSG_TRUNC;
912
913 return err ? -EFAULT : len;
914 }
915
916 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
917 return 0;
918
919 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
920 * the available implementations agree in this case:
921 * this call should never block, independent of the
922 * blocking state of the socket.
923 * Mike <pall@rz.uni-karlsruhe.de>
924 */
925 return -EAGAIN;
926}
927
928/* Clean up the receive buffer for full frames taken by the user,
929 * then send an ACK if necessary. COPIED is the number of bytes
930 * tcp_recvmsg has given to the user so far, it speeds up the
931 * calculation of whether or not we must ACK for the sake of
932 * a window update.
933 */
0e4b4992 934void tcp_cleanup_rbuf(struct sock *sk, int copied)
1da177e4
LT
935{
936 struct tcp_sock *tp = tcp_sk(sk);
937 int time_to_ack = 0;
938
939#if TCP_DEBUG
940 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
941
942 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
943#endif
944
463c84b9
ACM
945 if (inet_csk_ack_scheduled(sk)) {
946 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
947 /* Delayed ACKs frequently hit locked sockets during bulk
948 * receive. */
463c84b9 949 if (icsk->icsk_ack.blocked ||
1da177e4 950 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 951 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
952 /*
953 * If this read emptied read buffer, we send ACK, if
954 * connection is not bidirectional, user drained
955 * receive buffer and there was a small segment
956 * in queue.
957 */
463c84b9
ACM
958 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
959 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
960 time_to_ack = 1;
961 }
962
963 /* We send an ACK if we can now advertise a non-zero window
964 * which has been raised "significantly".
965 *
966 * Even if window raised up to infinity, do not send window open ACK
967 * in states, where we will not receive more. It is useless.
968 */
969 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
970 __u32 rcv_window_now = tcp_receive_window(tp);
971
972 /* Optimize, __tcp_select_window() is not cheap. */
973 if (2*rcv_window_now <= tp->window_clamp) {
974 __u32 new_window = __tcp_select_window(sk);
975
976 /* Send ACK now, if this read freed lots of space
977 * in our buffer. Certainly, new_window is new window.
978 * We can advertise it now, if it is not less than current one.
979 * "Lots" means "at least twice" here.
980 */
981 if (new_window && new_window >= 2 * rcv_window_now)
982 time_to_ack = 1;
983 }
984 }
985 if (time_to_ack)
986 tcp_send_ack(sk);
987}
988
989static void tcp_prequeue_process(struct sock *sk)
990{
991 struct sk_buff *skb;
992 struct tcp_sock *tp = tcp_sk(sk);
993
b03efcfb 994 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
995
996 /* RX process wants to run with disabled BHs, though it is not
997 * necessary */
998 local_bh_disable();
999 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1000 sk->sk_backlog_rcv(sk, skb);
1001 local_bh_enable();
1002
1003 /* Clear memory counter. */
1004 tp->ucopy.memory = 0;
1005}
1006
1007static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1008{
1009 struct sk_buff *skb;
1010 u32 offset;
1011
1012 skb_queue_walk(&sk->sk_receive_queue, skb) {
1013 offset = seq - TCP_SKB_CB(skb)->seq;
1014 if (skb->h.th->syn)
1015 offset--;
1016 if (offset < skb->len || skb->h.th->fin) {
1017 *off = offset;
1018 return skb;
1019 }
1020 }
1021 return NULL;
1022}
1023
1024/*
1025 * This routine provides an alternative to tcp_recvmsg() for routines
1026 * that would like to handle copying from skbuffs directly in 'sendfile'
1027 * fashion.
1028 * Note:
1029 * - It is assumed that the socket was locked by the caller.
1030 * - The routine does not block.
1031 * - At present, there is no support for reading OOB data
1032 * or for 'peeking' the socket using this routine
1033 * (although both would be easy to implement).
1034 */
1035int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1036 sk_read_actor_t recv_actor)
1037{
1038 struct sk_buff *skb;
1039 struct tcp_sock *tp = tcp_sk(sk);
1040 u32 seq = tp->copied_seq;
1041 u32 offset;
1042 int copied = 0;
1043
1044 if (sk->sk_state == TCP_LISTEN)
1045 return -ENOTCONN;
1046 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1047 if (offset < skb->len) {
1048 size_t used, len;
1049
1050 len = skb->len - offset;
1051 /* Stop reading if we hit a patch of urgent data */
1052 if (tp->urg_data) {
1053 u32 urg_offset = tp->urg_seq - seq;
1054 if (urg_offset < len)
1055 len = urg_offset;
1056 if (!len)
1057 break;
1058 }
1059 used = recv_actor(desc, skb, offset, len);
1060 if (used <= len) {
1061 seq += used;
1062 copied += used;
1063 offset += used;
1064 }
1065 if (offset != skb->len)
1066 break;
1067 }
1068 if (skb->h.th->fin) {
624d1164 1069 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1070 ++seq;
1071 break;
1072 }
624d1164 1073 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1074 if (!desc->count)
1075 break;
1076 }
1077 tp->copied_seq = seq;
1078
1079 tcp_rcv_space_adjust(sk);
1080
1081 /* Clean up data we have read: This will do ACK frames. */
1082 if (copied)
0e4b4992 1083 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1084 return copied;
1085}
1086
1087/*
1088 * This routine copies from a sock struct into the user buffer.
1089 *
1090 * Technical note: in 2.3 we work on _locked_ socket, so that
1091 * tricks with *seq access order and skb->users are not required.
1092 * Probably, code can be easily improved even more.
1093 */
1094
1095int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1096 size_t len, int nonblock, int flags, int *addr_len)
1097{
1098 struct tcp_sock *tp = tcp_sk(sk);
1099 int copied = 0;
1100 u32 peek_seq;
1101 u32 *seq;
1102 unsigned long used;
1103 int err;
1104 int target; /* Read at least this many bytes */
1105 long timeo;
1106 struct task_struct *user_recv = NULL;
1a2449a8 1107 int copied_early = 0;
1da177e4
LT
1108
1109 lock_sock(sk);
1110
1111 TCP_CHECK_TIMER(sk);
1112
1113 err = -ENOTCONN;
1114 if (sk->sk_state == TCP_LISTEN)
1115 goto out;
1116
1117 timeo = sock_rcvtimeo(sk, nonblock);
1118
1119 /* Urgent data needs to be handled specially. */
1120 if (flags & MSG_OOB)
1121 goto recv_urg;
1122
1123 seq = &tp->copied_seq;
1124 if (flags & MSG_PEEK) {
1125 peek_seq = tp->copied_seq;
1126 seq = &peek_seq;
1127 }
1128
1129 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1130
1a2449a8
CL
1131#ifdef CONFIG_NET_DMA
1132 tp->ucopy.dma_chan = NULL;
1133 preempt_disable();
1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1135 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) {
1136 preempt_enable_no_resched();
1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1138 } else
1139 preempt_enable_no_resched();
1140#endif
1141
1da177e4
LT
1142 do {
1143 struct sk_buff *skb;
1144 u32 offset;
1145
1146 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1147 if (tp->urg_data && tp->urg_seq == *seq) {
1148 if (copied)
1149 break;
1150 if (signal_pending(current)) {
1151 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1152 break;
1153 }
1154 }
1155
1156 /* Next get a buffer. */
1157
1158 skb = skb_peek(&sk->sk_receive_queue);
1159 do {
1160 if (!skb)
1161 break;
1162
1163 /* Now that we have two receive queues this
1164 * shouldn't happen.
1165 */
1166 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1167 printk(KERN_INFO "recvmsg bug: copied %X "
1168 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1169 break;
1170 }
1171 offset = *seq - TCP_SKB_CB(skb)->seq;
1172 if (skb->h.th->syn)
1173 offset--;
1174 if (offset < skb->len)
1175 goto found_ok_skb;
1176 if (skb->h.th->fin)
1177 goto found_fin_ok;
1178 BUG_TRAP(flags & MSG_PEEK);
1179 skb = skb->next;
1180 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1181
1182 /* Well, if we have backlog, try to process it now yet. */
1183
1184 if (copied >= target && !sk->sk_backlog.tail)
1185 break;
1186
1187 if (copied) {
1188 if (sk->sk_err ||
1189 sk->sk_state == TCP_CLOSE ||
1190 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1191 !timeo ||
1192 signal_pending(current) ||
1193 (flags & MSG_PEEK))
1194 break;
1195 } else {
1196 if (sock_flag(sk, SOCK_DONE))
1197 break;
1198
1199 if (sk->sk_err) {
1200 copied = sock_error(sk);
1201 break;
1202 }
1203
1204 if (sk->sk_shutdown & RCV_SHUTDOWN)
1205 break;
1206
1207 if (sk->sk_state == TCP_CLOSE) {
1208 if (!sock_flag(sk, SOCK_DONE)) {
1209 /* This occurs when user tries to read
1210 * from never connected socket.
1211 */
1212 copied = -ENOTCONN;
1213 break;
1214 }
1215 break;
1216 }
1217
1218 if (!timeo) {
1219 copied = -EAGAIN;
1220 break;
1221 }
1222
1223 if (signal_pending(current)) {
1224 copied = sock_intr_errno(timeo);
1225 break;
1226 }
1227 }
1228
0e4b4992 1229 tcp_cleanup_rbuf(sk, copied);
1da177e4 1230
7df55125 1231 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1232 /* Install new reader */
1233 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1234 user_recv = current;
1235 tp->ucopy.task = user_recv;
1236 tp->ucopy.iov = msg->msg_iov;
1237 }
1238
1239 tp->ucopy.len = len;
1240
1241 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1242 (flags & (MSG_PEEK | MSG_TRUNC)));
1243
1244 /* Ugly... If prequeue is not empty, we have to
1245 * process it before releasing socket, otherwise
1246 * order will be broken at second iteration.
1247 * More elegant solution is required!!!
1248 *
1249 * Look: we have the following (pseudo)queues:
1250 *
1251 * 1. packets in flight
1252 * 2. backlog
1253 * 3. prequeue
1254 * 4. receive_queue
1255 *
1256 * Each queue can be processed only if the next ones
1257 * are empty. At this point we have empty receive_queue.
1258 * But prequeue _can_ be not empty after 2nd iteration,
1259 * when we jumped to start of loop because backlog
1260 * processing added something to receive_queue.
1261 * We cannot release_sock(), because backlog contains
1262 * packets arrived _after_ prequeued ones.
1263 *
1264 * Shortly, algorithm is clear --- to process all
1265 * the queues in order. We could make it more directly,
1266 * requeueing packets from backlog to prequeue, if
1267 * is not empty. It is more elegant, but eats cycles,
1268 * unfortunately.
1269 */
b03efcfb 1270 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1271 goto do_prequeue;
1272
1273 /* __ Set realtime policy in scheduler __ */
1274 }
1275
1276 if (copied >= target) {
1277 /* Do not sleep, just process backlog. */
1278 release_sock(sk);
1279 lock_sock(sk);
1280 } else
1281 sk_wait_data(sk, &timeo);
1282
1a2449a8
CL
1283#ifdef CONFIG_NET_DMA
1284 tp->ucopy.wakeup = 0;
1285#endif
1286
1da177e4
LT
1287 if (user_recv) {
1288 int chunk;
1289
1290 /* __ Restore normal policy in scheduler __ */
1291
1292 if ((chunk = len - tp->ucopy.len) != 0) {
1293 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1294 len -= chunk;
1295 copied += chunk;
1296 }
1297
1298 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1299 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1300do_prequeue:
1301 tcp_prequeue_process(sk);
1302
1303 if ((chunk = len - tp->ucopy.len) != 0) {
1304 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1305 len -= chunk;
1306 copied += chunk;
1307 }
1308 }
1309 }
1310 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1311 if (net_ratelimit())
1312 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1313 current->comm, current->pid);
1314 peek_seq = tp->copied_seq;
1315 }
1316 continue;
1317
1318 found_ok_skb:
1319 /* Ok so how much can we use? */
1320 used = skb->len - offset;
1321 if (len < used)
1322 used = len;
1323
1324 /* Do we have urgent data here? */
1325 if (tp->urg_data) {
1326 u32 urg_offset = tp->urg_seq - *seq;
1327 if (urg_offset < used) {
1328 if (!urg_offset) {
1329 if (!sock_flag(sk, SOCK_URGINLINE)) {
1330 ++*seq;
1331 offset++;
1332 used--;
1333 if (!used)
1334 goto skip_copy;
1335 }
1336 } else
1337 used = urg_offset;
1338 }
1339 }
1340
1341 if (!(flags & MSG_TRUNC)) {
1a2449a8
CL
1342#ifdef CONFIG_NET_DMA
1343 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1344 tp->ucopy.dma_chan = get_softnet_dma();
1345
1346 if (tp->ucopy.dma_chan) {
1347 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1348 tp->ucopy.dma_chan, skb, offset,
1349 msg->msg_iov, used,
1350 tp->ucopy.pinned_list);
1351
1352 if (tp->ucopy.dma_cookie < 0) {
1353
1354 printk(KERN_ALERT "dma_cookie < 0\n");
1355
1356 /* Exception. Bailout! */
1357 if (!copied)
1358 copied = -EFAULT;
1359 break;
1360 }
1361 if ((offset + used) == skb->len)
1362 copied_early = 1;
1363
1364 } else
1365#endif
1366 {
1367 err = skb_copy_datagram_iovec(skb, offset,
1368 msg->msg_iov, used);
1369 if (err) {
1370 /* Exception. Bailout! */
1371 if (!copied)
1372 copied = -EFAULT;
1373 break;
1374 }
1da177e4
LT
1375 }
1376 }
1377
1378 *seq += used;
1379 copied += used;
1380 len -= used;
1381
1382 tcp_rcv_space_adjust(sk);
1383
1384skip_copy:
1385 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1386 tp->urg_data = 0;
1387 tcp_fast_path_check(sk, tp);
1388 }
1389 if (used + offset < skb->len)
1390 continue;
1391
1392 if (skb->h.th->fin)
1393 goto found_fin_ok;
1a2449a8
CL
1394 if (!(flags & MSG_PEEK)) {
1395 sk_eat_skb(sk, skb, copied_early);
1396 copied_early = 0;
1397 }
1da177e4
LT
1398 continue;
1399
1400 found_fin_ok:
1401 /* Process the FIN. */
1402 ++*seq;
1a2449a8
CL
1403 if (!(flags & MSG_PEEK)) {
1404 sk_eat_skb(sk, skb, copied_early);
1405 copied_early = 0;
1406 }
1da177e4
LT
1407 break;
1408 } while (len > 0);
1409
1410 if (user_recv) {
b03efcfb 1411 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1412 int chunk;
1413
1414 tp->ucopy.len = copied > 0 ? len : 0;
1415
1416 tcp_prequeue_process(sk);
1417
1418 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1419 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1420 len -= chunk;
1421 copied += chunk;
1422 }
1423 }
1424
1425 tp->ucopy.task = NULL;
1426 tp->ucopy.len = 0;
1427 }
1428
1a2449a8
CL
1429#ifdef CONFIG_NET_DMA
1430 if (tp->ucopy.dma_chan) {
1431 struct sk_buff *skb;
1432 dma_cookie_t done, used;
1433
1434 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1435
1436 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1437 tp->ucopy.dma_cookie, &done,
1438 &used) == DMA_IN_PROGRESS) {
1439 /* do partial cleanup of sk_async_wait_queue */
1440 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1441 (dma_async_is_complete(skb->dma_cookie, done,
1442 used) == DMA_SUCCESS)) {
1443 __skb_dequeue(&sk->sk_async_wait_queue);
1444 kfree_skb(skb);
1445 }
1446 }
1447
1448 /* Safe to free early-copied skbs now */
1449 __skb_queue_purge(&sk->sk_async_wait_queue);
1450 dma_chan_put(tp->ucopy.dma_chan);
1451 tp->ucopy.dma_chan = NULL;
1452 }
1453 if (tp->ucopy.pinned_list) {
1454 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1455 tp->ucopy.pinned_list = NULL;
1456 }
1457#endif
1458
1da177e4
LT
1459 /* According to UNIX98, msg_name/msg_namelen are ignored
1460 * on connected socket. I was just happy when found this 8) --ANK
1461 */
1462
1463 /* Clean up data we have read: This will do ACK frames. */
0e4b4992 1464 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1465
1466 TCP_CHECK_TIMER(sk);
1467 release_sock(sk);
1468 return copied;
1469
1470out:
1471 TCP_CHECK_TIMER(sk);
1472 release_sock(sk);
1473 return err;
1474
1475recv_urg:
1476 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1477 goto out;
1478}
1479
1480/*
1481 * State processing on a close. This implements the state shift for
1482 * sending our FIN frame. Note that we only send a FIN for some
1483 * states. A shutdown() may have already sent the FIN, or we may be
1484 * closed.
1485 */
1486
9b5b5cff 1487static const unsigned char new_state[16] = {
1da177e4
LT
1488 /* current state: new state: action: */
1489 /* (Invalid) */ TCP_CLOSE,
1490 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1491 /* TCP_SYN_SENT */ TCP_CLOSE,
1492 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1493 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1494 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1495 /* TCP_TIME_WAIT */ TCP_CLOSE,
1496 /* TCP_CLOSE */ TCP_CLOSE,
1497 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1498 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1499 /* TCP_LISTEN */ TCP_CLOSE,
1500 /* TCP_CLOSING */ TCP_CLOSING,
1501};
1502
1503static int tcp_close_state(struct sock *sk)
1504{
1505 int next = (int)new_state[sk->sk_state];
1506 int ns = next & TCP_STATE_MASK;
1507
1508 tcp_set_state(sk, ns);
1509
1510 return next & TCP_ACTION_FIN;
1511}
1512
1513/*
1514 * Shutdown the sending side of a connection. Much like close except
1515 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1516 */
1517
1518void tcp_shutdown(struct sock *sk, int how)
1519{
1520 /* We need to grab some memory, and put together a FIN,
1521 * and then put it into the queue to be sent.
1522 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1523 */
1524 if (!(how & SEND_SHUTDOWN))
1525 return;
1526
1527 /* If we've already sent a FIN, or it's a closed state, skip this. */
1528 if ((1 << sk->sk_state) &
1529 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1530 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1531 /* Clear out any half completed packets. FIN if needed. */
1532 if (tcp_close_state(sk))
1533 tcp_send_fin(sk);
1534 }
1535}
1536
1da177e4
LT
1537void tcp_close(struct sock *sk, long timeout)
1538{
1539 struct sk_buff *skb;
1540 int data_was_unread = 0;
75c2d907 1541 int state;
1da177e4
LT
1542
1543 lock_sock(sk);
1544 sk->sk_shutdown = SHUTDOWN_MASK;
1545
1546 if (sk->sk_state == TCP_LISTEN) {
1547 tcp_set_state(sk, TCP_CLOSE);
1548
1549 /* Special case. */
0a5578cf 1550 inet_csk_listen_stop(sk);
1da177e4
LT
1551
1552 goto adjudge_to_death;
1553 }
1554
1555 /* We need to flush the recv. buffs. We do this only on the
1556 * descriptor close, not protocol-sourced closes, because the
1557 * reader process may not have drained the data yet!
1558 */
1559 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1560 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1561 skb->h.th->fin;
1562 data_was_unread += len;
1563 __kfree_skb(skb);
1564 }
1565
1566 sk_stream_mem_reclaim(sk);
1567
1568 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1569 * 3.10, we send a RST here because data was lost. To
1570 * witness the awful effects of the old behavior of always
1571 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1572 * a bulk GET in an FTP client, suspend the process, wait
1573 * for the client to advertise a zero window, then kill -9
1574 * the FTP client, wheee... Note: timeout is always zero
1575 * in such a case.
1576 */
1577 if (data_was_unread) {
1578 /* Unread data was tossed, zap the connection. */
1579 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1580 tcp_set_state(sk, TCP_CLOSE);
1581 tcp_send_active_reset(sk, GFP_KERNEL);
1582 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1583 /* Check zero linger _after_ checking for unread data. */
1584 sk->sk_prot->disconnect(sk, 0);
1585 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1586 } else if (tcp_close_state(sk)) {
1587 /* We FIN if the application ate all the data before
1588 * zapping the connection.
1589 */
1590
1591 /* RED-PEN. Formally speaking, we have broken TCP state
1592 * machine. State transitions:
1593 *
1594 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1595 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1596 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1597 *
1598 * are legal only when FIN has been sent (i.e. in window),
1599 * rather than queued out of window. Purists blame.
1600 *
1601 * F.e. "RFC state" is ESTABLISHED,
1602 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1603 *
1604 * The visible declinations are that sometimes
1605 * we enter time-wait state, when it is not required really
1606 * (harmless), do not send active resets, when they are
1607 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1608 * they look as CLOSING or LAST_ACK for Linux)
1609 * Probably, I missed some more holelets.
1610 * --ANK
1611 */
1612 tcp_send_fin(sk);
1613 }
1614
1615 sk_stream_wait_close(sk, timeout);
1616
1617adjudge_to_death:
75c2d907
HX
1618 state = sk->sk_state;
1619 sock_hold(sk);
1620 sock_orphan(sk);
1621 atomic_inc(sk->sk_prot->orphan_count);
1622
1da177e4
LT
1623 /* It is the last release_sock in its life. It will remove backlog. */
1624 release_sock(sk);
1625
1626
1627 /* Now socket is owned by kernel and we acquire BH lock
1628 to finish close. No need to check for user refs.
1629 */
1630 local_bh_disable();
1631 bh_lock_sock(sk);
1632 BUG_TRAP(!sock_owned_by_user(sk));
1633
75c2d907
HX
1634 /* Have we already been destroyed by a softirq or backlog? */
1635 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1636 goto out;
1da177e4
LT
1637
1638 /* This is a (useful) BSD violating of the RFC. There is a
1639 * problem with TCP as specified in that the other end could
1640 * keep a socket open forever with no application left this end.
1641 * We use a 3 minute timeout (about the same as BSD) then kill
1642 * our end. If they send after that then tough - BUT: long enough
1643 * that we won't make the old 4*rto = almost no time - whoops
1644 * reset mistake.
1645 *
1646 * Nope, it was not mistake. It is really desired behaviour
1647 * f.e. on http servers, when such sockets are useless, but
1648 * consume significant resources. Let's do it with special
1649 * linger2 option. --ANK
1650 */
1651
1652 if (sk->sk_state == TCP_FIN_WAIT2) {
1653 struct tcp_sock *tp = tcp_sk(sk);
1654 if (tp->linger2 < 0) {
1655 tcp_set_state(sk, TCP_CLOSE);
1656 tcp_send_active_reset(sk, GFP_ATOMIC);
1657 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1658 } else {
463c84b9 1659 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1660
1661 if (tmo > TCP_TIMEWAIT_LEN) {
463c84b9 1662 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1da177e4 1663 } else {
1da177e4
LT
1664 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1665 goto out;
1666 }
1667 }
1668 }
1669 if (sk->sk_state != TCP_CLOSE) {
1670 sk_stream_mem_reclaim(sk);
0a5578cf 1671 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1da177e4
LT
1672 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1673 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1674 if (net_ratelimit())
1675 printk(KERN_INFO "TCP: too many of orphaned "
1676 "sockets\n");
1677 tcp_set_state(sk, TCP_CLOSE);
1678 tcp_send_active_reset(sk, GFP_ATOMIC);
1679 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1680 }
1681 }
1da177e4
LT
1682
1683 if (sk->sk_state == TCP_CLOSE)
0a5578cf 1684 inet_csk_destroy_sock(sk);
1da177e4
LT
1685 /* Otherwise, socket is reprieved until protocol close. */
1686
1687out:
1688 bh_unlock_sock(sk);
1689 local_bh_enable();
1690 sock_put(sk);
1691}
1692
1693/* These states need RST on ABORT according to RFC793 */
1694
1695static inline int tcp_need_reset(int state)
1696{
1697 return (1 << state) &
1698 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1699 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1700}
1701
1702int tcp_disconnect(struct sock *sk, int flags)
1703{
1704 struct inet_sock *inet = inet_sk(sk);
463c84b9 1705 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1706 struct tcp_sock *tp = tcp_sk(sk);
1707 int err = 0;
1708 int old_state = sk->sk_state;
1709
1710 if (old_state != TCP_CLOSE)
1711 tcp_set_state(sk, TCP_CLOSE);
1712
1713 /* ABORT function of RFC793 */
1714 if (old_state == TCP_LISTEN) {
0a5578cf 1715 inet_csk_listen_stop(sk);
1da177e4
LT
1716 } else if (tcp_need_reset(old_state) ||
1717 (tp->snd_nxt != tp->write_seq &&
1718 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
caa20d9a 1719 /* The last check adjusts for discrepancy of Linux wrt. RFC
1da177e4
LT
1720 * states
1721 */
1722 tcp_send_active_reset(sk, gfp_any());
1723 sk->sk_err = ECONNRESET;
1724 } else if (old_state == TCP_SYN_SENT)
1725 sk->sk_err = ECONNRESET;
1726
1727 tcp_clear_xmit_timers(sk);
1728 __skb_queue_purge(&sk->sk_receive_queue);
1729 sk_stream_writequeue_purge(sk);
1730 __skb_queue_purge(&tp->out_of_order_queue);
1a2449a8
CL
1731#ifdef CONFIG_NET_DMA
1732 __skb_queue_purge(&sk->sk_async_wait_queue);
1733#endif
1da177e4
LT
1734
1735 inet->dport = 0;
1736
1737 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1738 inet_reset_saddr(sk);
1739
1740 sk->sk_shutdown = 0;
1741 sock_reset_flag(sk, SOCK_DONE);
1742 tp->srtt = 0;
1743 if ((tp->write_seq += tp->max_window + 2) == 0)
1744 tp->write_seq = 1;
463c84b9 1745 icsk->icsk_backoff = 0;
1da177e4 1746 tp->snd_cwnd = 2;
6687e988 1747 icsk->icsk_probes_out = 0;
1da177e4
LT
1748 tp->packets_out = 0;
1749 tp->snd_ssthresh = 0x7fffffff;
1750 tp->snd_cwnd_cnt = 0;
9772efb9 1751 tp->bytes_acked = 0;
6687e988 1752 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 1753 tcp_clear_retrans(tp);
463c84b9 1754 inet_csk_delack_init(sk);
1da177e4
LT
1755 sk->sk_send_head = NULL;
1756 tp->rx_opt.saw_tstamp = 0;
1757 tcp_sack_reset(&tp->rx_opt);
1758 __sk_dst_reset(sk);
1759
463c84b9 1760 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1761
1762 sk->sk_error_report(sk);
1763 return err;
1764}
1765
1da177e4
LT
1766/*
1767 * Socket option code for TCP.
1768 */
3fdadf7d
DM
1769static int do_tcp_setsockopt(struct sock *sk, int level,
1770 int optname, char __user *optval, int optlen)
1da177e4
LT
1771{
1772 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1773 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1774 int val;
1775 int err = 0;
1776
5f8ef48d
SH
1777 /* This is a string value all the others are int's */
1778 if (optname == TCP_CONGESTION) {
1779 char name[TCP_CA_NAME_MAX];
1780
1781 if (optlen < 1)
1782 return -EINVAL;
1783
1784 val = strncpy_from_user(name, optval,
1785 min(TCP_CA_NAME_MAX-1, optlen));
1786 if (val < 0)
1787 return -EFAULT;
1788 name[val] = 0;
1789
1790 lock_sock(sk);
6687e988 1791 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
1792 release_sock(sk);
1793 return err;
1794 }
1795
1da177e4
LT
1796 if (optlen < sizeof(int))
1797 return -EINVAL;
1798
1799 if (get_user(val, (int __user *)optval))
1800 return -EFAULT;
1801
1802 lock_sock(sk);
1803
1804 switch (optname) {
1805 case TCP_MAXSEG:
1806 /* Values greater than interface MTU won't take effect. However
1807 * at the point when this call is done we typically don't yet
1808 * know which interface is going to be used */
1809 if (val < 8 || val > MAX_TCP_WINDOW) {
1810 err = -EINVAL;
1811 break;
1812 }
1813 tp->rx_opt.user_mss = val;
1814 break;
1815
1816 case TCP_NODELAY:
1817 if (val) {
1818 /* TCP_NODELAY is weaker than TCP_CORK, so that
1819 * this option on corked socket is remembered, but
1820 * it is not activated until cork is cleared.
1821 *
1822 * However, when TCP_NODELAY is set we make
1823 * an explicit push, which overrides even TCP_CORK
1824 * for currently queued segments.
1825 */
1826 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1827 tcp_push_pending_frames(sk, tp);
1828 } else {
1829 tp->nonagle &= ~TCP_NAGLE_OFF;
1830 }
1831 break;
1832
1833 case TCP_CORK:
1834 /* When set indicates to always queue non-full frames.
1835 * Later the user clears this option and we transmit
1836 * any pending partial frames in the queue. This is
1837 * meant to be used alongside sendfile() to get properly
1838 * filled frames when the user (for example) must write
1839 * out headers with a write() call first and then use
1840 * sendfile to send out the data parts.
1841 *
1842 * TCP_CORK can be set together with TCP_NODELAY and it is
1843 * stronger than TCP_NODELAY.
1844 */
1845 if (val) {
1846 tp->nonagle |= TCP_NAGLE_CORK;
1847 } else {
1848 tp->nonagle &= ~TCP_NAGLE_CORK;
1849 if (tp->nonagle&TCP_NAGLE_OFF)
1850 tp->nonagle |= TCP_NAGLE_PUSH;
1851 tcp_push_pending_frames(sk, tp);
1852 }
1853 break;
1854
1855 case TCP_KEEPIDLE:
1856 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1857 err = -EINVAL;
1858 else {
1859 tp->keepalive_time = val * HZ;
1860 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1861 !((1 << sk->sk_state) &
1862 (TCPF_CLOSE | TCPF_LISTEN))) {
1863 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1864 if (tp->keepalive_time > elapsed)
1865 elapsed = tp->keepalive_time - elapsed;
1866 else
1867 elapsed = 0;
463c84b9 1868 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
1869 }
1870 }
1871 break;
1872 case TCP_KEEPINTVL:
1873 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1874 err = -EINVAL;
1875 else
1876 tp->keepalive_intvl = val * HZ;
1877 break;
1878 case TCP_KEEPCNT:
1879 if (val < 1 || val > MAX_TCP_KEEPCNT)
1880 err = -EINVAL;
1881 else
1882 tp->keepalive_probes = val;
1883 break;
1884 case TCP_SYNCNT:
1885 if (val < 1 || val > MAX_TCP_SYNCNT)
1886 err = -EINVAL;
1887 else
463c84b9 1888 icsk->icsk_syn_retries = val;
1da177e4
LT
1889 break;
1890
1891 case TCP_LINGER2:
1892 if (val < 0)
1893 tp->linger2 = -1;
1894 else if (val > sysctl_tcp_fin_timeout / HZ)
1895 tp->linger2 = 0;
1896 else
1897 tp->linger2 = val * HZ;
1898 break;
1899
1900 case TCP_DEFER_ACCEPT:
295f7324 1901 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1da177e4
LT
1902 if (val > 0) {
1903 /* Translate value in seconds to number of
1904 * retransmits */
295f7324 1905 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1da177e4 1906 val > ((TCP_TIMEOUT_INIT / HZ) <<
295f7324
ACM
1907 icsk->icsk_accept_queue.rskq_defer_accept))
1908 icsk->icsk_accept_queue.rskq_defer_accept++;
1909 icsk->icsk_accept_queue.rskq_defer_accept++;
1da177e4
LT
1910 }
1911 break;
1912
1913 case TCP_WINDOW_CLAMP:
1914 if (!val) {
1915 if (sk->sk_state != TCP_CLOSE) {
1916 err = -EINVAL;
1917 break;
1918 }
1919 tp->window_clamp = 0;
1920 } else
1921 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1922 SOCK_MIN_RCVBUF / 2 : val;
1923 break;
1924
1925 case TCP_QUICKACK:
1926 if (!val) {
463c84b9 1927 icsk->icsk_ack.pingpong = 1;
1da177e4 1928 } else {
463c84b9 1929 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
1930 if ((1 << sk->sk_state) &
1931 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
1932 inet_csk_ack_scheduled(sk)) {
1933 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
0e4b4992 1934 tcp_cleanup_rbuf(sk, 1);
1da177e4 1935 if (!(val & 1))
463c84b9 1936 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
1937 }
1938 }
1939 break;
1940
1941 default:
1942 err = -ENOPROTOOPT;
1943 break;
1944 };
1945 release_sock(sk);
1946 return err;
1947}
1948
3fdadf7d
DM
1949int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1950 int optlen)
1951{
1952 struct inet_connection_sock *icsk = inet_csk(sk);
1953
1954 if (level != SOL_TCP)
1955 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1956 optval, optlen);
1957 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1958}
1959
1960#ifdef CONFIG_COMPAT
543d9cfe
ACM
1961int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1962 char __user *optval, int optlen)
3fdadf7d 1963{
dec73ff0
ACM
1964 if (level != SOL_TCP)
1965 return inet_csk_compat_setsockopt(sk, level, optname,
1966 optval, optlen);
3fdadf7d
DM
1967 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1968}
543d9cfe
ACM
1969
1970EXPORT_SYMBOL(compat_tcp_setsockopt);
3fdadf7d
DM
1971#endif
1972
1da177e4
LT
1973/* Return information about state of tcp endpoint in API format. */
1974void tcp_get_info(struct sock *sk, struct tcp_info *info)
1975{
1976 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1977 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1978 u32 now = tcp_time_stamp;
1979
1980 memset(info, 0, sizeof(*info));
1981
1982 info->tcpi_state = sk->sk_state;
6687e988 1983 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 1984 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 1985 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 1986 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
1987
1988 if (tp->rx_opt.tstamp_ok)
1989 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1990 if (tp->rx_opt.sack_ok)
1991 info->tcpi_options |= TCPI_OPT_SACK;
1992 if (tp->rx_opt.wscale_ok) {
1993 info->tcpi_options |= TCPI_OPT_WSCALE;
1994 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
1995 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
1996 }
1997
1998 if (tp->ecn_flags&TCP_ECN_OK)
1999 info->tcpi_options |= TCPI_OPT_ECN;
2000
463c84b9
ACM
2001 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2002 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2003 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2004 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
2005
2006 info->tcpi_unacked = tp->packets_out;
2007 info->tcpi_sacked = tp->sacked_out;
2008 info->tcpi_lost = tp->lost_out;
2009 info->tcpi_retrans = tp->retrans_out;
2010 info->tcpi_fackets = tp->fackets_out;
2011
2012 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2013 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2014 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2015
d83d8461 2016 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1da177e4
LT
2017 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2018 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2019 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2020 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2021 info->tcpi_snd_cwnd = tp->snd_cwnd;
2022 info->tcpi_advmss = tp->advmss;
2023 info->tcpi_reordering = tp->reordering;
2024
2025 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2026 info->tcpi_rcv_space = tp->rcvq_space.space;
2027
2028 info->tcpi_total_retrans = tp->total_retrans;
2029}
2030
2031EXPORT_SYMBOL_GPL(tcp_get_info);
2032
3fdadf7d
DM
2033static int do_tcp_getsockopt(struct sock *sk, int level,
2034 int optname, char __user *optval, int __user *optlen)
1da177e4 2035{
295f7324 2036 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2037 struct tcp_sock *tp = tcp_sk(sk);
2038 int val, len;
2039
1da177e4
LT
2040 if (get_user(len, optlen))
2041 return -EFAULT;
2042
2043 len = min_t(unsigned int, len, sizeof(int));
2044
2045 if (len < 0)
2046 return -EINVAL;
2047
2048 switch (optname) {
2049 case TCP_MAXSEG:
c1b4a7e6 2050 val = tp->mss_cache;
1da177e4
LT
2051 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2052 val = tp->rx_opt.user_mss;
2053 break;
2054 case TCP_NODELAY:
2055 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2056 break;
2057 case TCP_CORK:
2058 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2059 break;
2060 case TCP_KEEPIDLE:
2061 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2062 break;
2063 case TCP_KEEPINTVL:
2064 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2065 break;
2066 case TCP_KEEPCNT:
2067 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2068 break;
2069 case TCP_SYNCNT:
295f7324 2070 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
2071 break;
2072 case TCP_LINGER2:
2073 val = tp->linger2;
2074 if (val >= 0)
2075 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2076 break;
2077 case TCP_DEFER_ACCEPT:
295f7324
ACM
2078 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2079 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
1da177e4
LT
2080 break;
2081 case TCP_WINDOW_CLAMP:
2082 val = tp->window_clamp;
2083 break;
2084 case TCP_INFO: {
2085 struct tcp_info info;
2086
2087 if (get_user(len, optlen))
2088 return -EFAULT;
2089
2090 tcp_get_info(sk, &info);
2091
2092 len = min_t(unsigned int, len, sizeof(info));
2093 if (put_user(len, optlen))
2094 return -EFAULT;
2095 if (copy_to_user(optval, &info, len))
2096 return -EFAULT;
2097 return 0;
2098 }
2099 case TCP_QUICKACK:
295f7324 2100 val = !icsk->icsk_ack.pingpong;
1da177e4 2101 break;
5f8ef48d
SH
2102
2103 case TCP_CONGESTION:
2104 if (get_user(len, optlen))
2105 return -EFAULT;
2106 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2107 if (put_user(len, optlen))
2108 return -EFAULT;
6687e988 2109 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
2110 return -EFAULT;
2111 return 0;
1da177e4
LT
2112 default:
2113 return -ENOPROTOOPT;
2114 };
2115
2116 if (put_user(len, optlen))
2117 return -EFAULT;
2118 if (copy_to_user(optval, &val, len))
2119 return -EFAULT;
2120 return 0;
2121}
2122
3fdadf7d
DM
2123int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2124 int __user *optlen)
2125{
2126 struct inet_connection_sock *icsk = inet_csk(sk);
2127
2128 if (level != SOL_TCP)
2129 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2130 optval, optlen);
2131 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2132}
2133
2134#ifdef CONFIG_COMPAT
543d9cfe
ACM
2135int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2136 char __user *optval, int __user *optlen)
3fdadf7d 2137{
dec73ff0
ACM
2138 if (level != SOL_TCP)
2139 return inet_csk_compat_getsockopt(sk, level, optname,
2140 optval, optlen);
3fdadf7d
DM
2141 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2142}
543d9cfe
ACM
2143
2144EXPORT_SYMBOL(compat_tcp_getsockopt);
3fdadf7d 2145#endif
1da177e4
LT
2146
2147extern void __skb_cb_too_small_for_tcp(int, int);
5f8ef48d 2148extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2149
2150static __initdata unsigned long thash_entries;
2151static int __init set_thash_entries(char *str)
2152{
2153 if (!str)
2154 return 0;
2155 thash_entries = simple_strtoul(str, &str, 0);
2156 return 1;
2157}
2158__setup("thash_entries=", set_thash_entries);
2159
2160void __init tcp_init(void)
2161{
2162 struct sk_buff *skb = NULL;
7b4f4b5e
JH
2163 unsigned long limit;
2164 int order, i, max_share;
1da177e4
LT
2165
2166 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2167 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2168 sizeof(skb->cb));
2169
6e04e021
ACM
2170 tcp_hashinfo.bind_bucket_cachep =
2171 kmem_cache_create("tcp_bind_bucket",
2172 sizeof(struct inet_bind_bucket), 0,
2173 SLAB_HWCACHE_ALIGN, NULL, NULL);
2174 if (!tcp_hashinfo.bind_bucket_cachep)
1da177e4
LT
2175 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2176
1da177e4
LT
2177 /* Size and allocate the main established and bind bucket
2178 * hash tables.
2179 *
2180 * The methodology is similar to that of the buffer cache.
2181 */
6e04e021 2182 tcp_hashinfo.ehash =
1da177e4 2183 alloc_large_system_hash("TCP established",
0f7ff927 2184 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2185 thash_entries,
2186 (num_physpages >= 128 * 1024) ?
18955cfc 2187 13 : 15,
1da177e4 2188 HASH_HIGHMEM,
6e04e021 2189 &tcp_hashinfo.ehash_size,
1da177e4
LT
2190 NULL,
2191 0);
6e04e021
ACM
2192 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2193 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2194 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2195 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
1da177e4
LT
2196 }
2197
6e04e021 2198 tcp_hashinfo.bhash =
1da177e4 2199 alloc_large_system_hash("TCP bind",
0f7ff927 2200 sizeof(struct inet_bind_hashbucket),
6e04e021 2201 tcp_hashinfo.ehash_size,
1da177e4 2202 (num_physpages >= 128 * 1024) ?
18955cfc 2203 13 : 15,
1da177e4 2204 HASH_HIGHMEM,
6e04e021 2205 &tcp_hashinfo.bhash_size,
1da177e4
LT
2206 NULL,
2207 64 * 1024);
6e04e021
ACM
2208 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2209 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2210 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2211 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2212 }
2213
2214 /* Try to be a bit smarter and adjust defaults depending
2215 * on available memory.
2216 */
2217 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2218 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2219 order++)
2220 ;
e7626486 2221 if (order >= 4) {
1da177e4
LT
2222 sysctl_local_port_range[0] = 32768;
2223 sysctl_local_port_range[1] = 61000;
295ff7ed 2224 tcp_death_row.sysctl_max_tw_buckets = 180000;
1da177e4
LT
2225 sysctl_tcp_max_orphans = 4096 << (order - 4);
2226 sysctl_max_syn_backlog = 1024;
2227 } else if (order < 3) {
2228 sysctl_local_port_range[0] = 1024 * (3 - order);
295ff7ed 2229 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
1da177e4
LT
2230 sysctl_tcp_max_orphans >>= (3 - order);
2231 sysctl_max_syn_backlog = 128;
2232 }
1da177e4
LT
2233
2234 sysctl_tcp_mem[0] = 768 << order;
2235 sysctl_tcp_mem[1] = 1024 << order;
2236 sysctl_tcp_mem[2] = 1536 << order;
2237
7b4f4b5e
JH
2238 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2239 max_share = min(4UL*1024*1024, limit);
2240
2241 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2242 sysctl_tcp_wmem[1] = 16*1024;
2243 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2244
2245 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2246 sysctl_tcp_rmem[1] = 87380;
2247 sysctl_tcp_rmem[2] = max(87380, max_share);
1da177e4
LT
2248
2249 printk(KERN_INFO "TCP: Hash tables configured "
2250 "(established %d bind %d)\n",
6e04e021 2251 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
317a76f9
SH
2252
2253 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2254}
2255
1da177e4 2256EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2257EXPORT_SYMBOL(tcp_disconnect);
2258EXPORT_SYMBOL(tcp_getsockopt);
2259EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2260EXPORT_SYMBOL(tcp_poll);
2261EXPORT_SYMBOL(tcp_read_sock);
2262EXPORT_SYMBOL(tcp_recvmsg);
2263EXPORT_SYMBOL(tcp_sendmsg);
2264EXPORT_SYMBOL(tcp_sendpage);
2265EXPORT_SYMBOL(tcp_setsockopt);
2266EXPORT_SYMBOL(tcp_shutdown);
2267EXPORT_SYMBOL(tcp_statistics);