]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp.c
[NET]: Add sk_stream_wmem_schedule
[net-next-2.6.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
250#include <linux/config.h>
251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
256#include <linux/smp_lock.h>
257#include <linux/fs.h>
258#include <linux/random.h>
259#include <linux/bootmem.h>
260
261#include <net/icmp.h>
262#include <net/tcp.h>
263#include <net/xfrm.h>
264#include <net/ip.h>
265
266
267#include <asm/uaccess.h>
268#include <asm/ioctls.h>
269
270int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271
ba89966c 272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
1da177e4 273
1da177e4
LT
274atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275
0a5578cf
ACM
276EXPORT_SYMBOL_GPL(tcp_orphan_count);
277
1da177e4
LT
278int sysctl_tcp_mem[3];
279int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
280int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
281
282EXPORT_SYMBOL(sysctl_tcp_mem);
283EXPORT_SYMBOL(sysctl_tcp_rmem);
284EXPORT_SYMBOL(sysctl_tcp_wmem);
285
286atomic_t tcp_memory_allocated; /* Current allocated memory. */
287atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
288
289EXPORT_SYMBOL(tcp_memory_allocated);
290EXPORT_SYMBOL(tcp_sockets_allocated);
291
292/*
293 * Pressure flag: try to collapse.
294 * Technical note: it is used by multiple contexts non atomically.
295 * All the sk_stream_mem_schedule() is of this nature: accounting
296 * is strict, actions are advisory and have some latency.
297 */
298int tcp_memory_pressure;
299
300EXPORT_SYMBOL(tcp_memory_pressure);
301
302void tcp_enter_memory_pressure(void)
303{
304 if (!tcp_memory_pressure) {
305 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
306 tcp_memory_pressure = 1;
307 }
308}
309
310EXPORT_SYMBOL(tcp_enter_memory_pressure);
311
1da177e4
LT
312/*
313 * Wait for a TCP event.
314 *
315 * Note that we don't need to lock the socket, as the upper poll layers
316 * take care of normal races (between the test and the event) and we don't
317 * go look at any of the socket buffers directly.
318 */
319unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
320{
321 unsigned int mask;
322 struct sock *sk = sock->sk;
323 struct tcp_sock *tp = tcp_sk(sk);
324
325 poll_wait(file, sk->sk_sleep, wait);
326 if (sk->sk_state == TCP_LISTEN)
dc40c7bc 327 return inet_csk_listen_poll(sk);
1da177e4
LT
328
329 /* Socket is not locked. We are protected from async events
330 by poll logic and correct handling of state changes
331 made by another threads is impossible in any case.
332 */
333
334 mask = 0;
335 if (sk->sk_err)
336 mask = POLLERR;
337
338 /*
339 * POLLHUP is certainly not done right. But poll() doesn't
340 * have a notion of HUP in just one direction, and for a
341 * socket the read side is more interesting.
342 *
343 * Some poll() documentation says that POLLHUP is incompatible
344 * with the POLLOUT/POLLWR flags, so somebody should check this
345 * all. But careful, it tends to be safer to return too many
346 * bits than too few, and you can easily break real applications
347 * if you don't tell them that something has hung up!
348 *
349 * Check-me.
350 *
351 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
352 * our fs/select.c). It means that after we received EOF,
353 * poll always returns immediately, making impossible poll() on write()
354 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
355 * if and only if shutdown has been made in both directions.
356 * Actually, it is interesting to look how Solaris and DUX
357 * solve this dilemma. I would prefer, if PULLHUP were maskable,
358 * then we could set it on SND_SHUTDOWN. BTW examples given
359 * in Stevens' books assume exactly this behaviour, it explains
360 * why PULLHUP is incompatible with POLLOUT. --ANK
361 *
362 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
363 * blocking on fresh not-connected or disconnected socket. --ANK
364 */
365 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
366 mask |= POLLHUP;
367 if (sk->sk_shutdown & RCV_SHUTDOWN)
368 mask |= POLLIN | POLLRDNORM;
369
370 /* Connected? */
371 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
372 /* Potential race condition. If read of tp below will
373 * escape above sk->sk_state, we can be illegally awaken
374 * in SYN_* states. */
375 if ((tp->rcv_nxt != tp->copied_seq) &&
376 (tp->urg_seq != tp->copied_seq ||
377 tp->rcv_nxt != tp->copied_seq + 1 ||
378 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
379 mask |= POLLIN | POLLRDNORM;
380
381 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
382 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
383 mask |= POLLOUT | POLLWRNORM;
384 } else { /* send SIGIO later */
385 set_bit(SOCK_ASYNC_NOSPACE,
386 &sk->sk_socket->flags);
387 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
388
389 /* Race breaker. If space is freed after
390 * wspace test but before the flags are set,
391 * IO signal will be lost.
392 */
393 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
394 mask |= POLLOUT | POLLWRNORM;
395 }
396 }
397
398 if (tp->urg_data & TCP_URG_VALID)
399 mask |= POLLPRI;
400 }
401 return mask;
402}
403
404int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
405{
406 struct tcp_sock *tp = tcp_sk(sk);
407 int answ;
408
409 switch (cmd) {
410 case SIOCINQ:
411 if (sk->sk_state == TCP_LISTEN)
412 return -EINVAL;
413
414 lock_sock(sk);
415 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
416 answ = 0;
417 else if (sock_flag(sk, SOCK_URGINLINE) ||
418 !tp->urg_data ||
419 before(tp->urg_seq, tp->copied_seq) ||
420 !before(tp->urg_seq, tp->rcv_nxt)) {
421 answ = tp->rcv_nxt - tp->copied_seq;
422
423 /* Subtract 1, if FIN is in queue. */
424 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
425 answ -=
426 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
427 } else
428 answ = tp->urg_seq - tp->copied_seq;
429 release_sock(sk);
430 break;
431 case SIOCATMARK:
432 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
433 break;
434 case SIOCOUTQ:
435 if (sk->sk_state == TCP_LISTEN)
436 return -EINVAL;
437
438 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
439 answ = 0;
440 else
441 answ = tp->write_seq - tp->snd_una;
442 break;
443 default:
444 return -ENOIOCTLCMD;
445 };
446
447 return put_user(answ, (int __user *)arg);
448}
449
1da177e4
LT
450static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
451{
452 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
453 tp->pushed_seq = tp->write_seq;
454}
455
456static inline int forced_push(struct tcp_sock *tp)
457{
458 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
459}
460
461static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
462 struct sk_buff *skb)
463{
464 skb->csum = 0;
465 TCP_SKB_CB(skb)->seq = tp->write_seq;
466 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
467 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
468 TCP_SKB_CB(skb)->sacked = 0;
469 skb_header_release(skb);
470 __skb_queue_tail(&sk->sk_write_queue, skb);
471 sk_charge_skb(sk, skb);
472 if (!sk->sk_send_head)
473 sk->sk_send_head = skb;
89ebd197 474 if (tp->nonagle & TCP_NAGLE_PUSH)
1da177e4
LT
475 tp->nonagle &= ~TCP_NAGLE_PUSH;
476}
477
478static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
479 struct sk_buff *skb)
480{
481 if (flags & MSG_OOB) {
482 tp->urg_mode = 1;
483 tp->snd_up = tp->write_seq;
484 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
485 }
486}
487
488static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
489 int mss_now, int nonagle)
490{
491 if (sk->sk_send_head) {
492 struct sk_buff *skb = sk->sk_write_queue.prev;
493 if (!(flags & MSG_MORE) || forced_push(tp))
494 tcp_mark_push(tp, skb);
495 tcp_mark_urg(tp, flags, skb);
496 __tcp_push_pending_frames(sk, tp, mss_now,
497 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
498 }
499}
500
501static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
502 size_t psize, int flags)
503{
504 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 505 int mss_now, size_goal;
1da177e4
LT
506 int err;
507 ssize_t copied;
508 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
509
510 /* Wait for a connection to finish. */
511 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
512 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
513 goto out_err;
514
515 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
516
517 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 518 size_goal = tp->xmit_size_goal;
1da177e4
LT
519 copied = 0;
520
521 err = -EPIPE;
522 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
523 goto do_error;
524
525 while (psize > 0) {
526 struct sk_buff *skb = sk->sk_write_queue.prev;
527 struct page *page = pages[poffset / PAGE_SIZE];
528 int copy, i, can_coalesce;
529 int offset = poffset % PAGE_SIZE;
530 int size = min_t(size_t, psize, PAGE_SIZE - offset);
531
c1b4a7e6 532 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
533new_segment:
534 if (!sk_stream_memory_free(sk))
535 goto wait_for_sndbuf;
536
537 skb = sk_stream_alloc_pskb(sk, 0, 0,
538 sk->sk_allocation);
539 if (!skb)
540 goto wait_for_memory;
541
542 skb_entail(sk, tp, skb);
c1b4a7e6 543 copy = size_goal;
1da177e4
LT
544 }
545
546 if (copy > size)
547 copy = size;
548
549 i = skb_shinfo(skb)->nr_frags;
550 can_coalesce = skb_can_coalesce(skb, i, page, offset);
551 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
552 tcp_mark_push(tp, skb);
553 goto new_segment;
554 }
d80d99d6 555 if (!sk_stream_wmem_schedule(sk, copy))
1da177e4
LT
556 goto wait_for_memory;
557
558 if (can_coalesce) {
559 skb_shinfo(skb)->frags[i - 1].size += copy;
560 } else {
561 get_page(page);
562 skb_fill_page_desc(skb, i, page, offset, copy);
563 }
564
565 skb->len += copy;
566 skb->data_len += copy;
567 skb->truesize += copy;
568 sk->sk_wmem_queued += copy;
569 sk->sk_forward_alloc -= copy;
570 skb->ip_summed = CHECKSUM_HW;
571 tp->write_seq += copy;
572 TCP_SKB_CB(skb)->end_seq += copy;
573 skb_shinfo(skb)->tso_segs = 0;
574
575 if (!copied)
576 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
577
578 copied += copy;
579 poffset += copy;
580 if (!(psize -= copy))
581 goto out;
582
c1b4a7e6 583 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
584 continue;
585
586 if (forced_push(tp)) {
587 tcp_mark_push(tp, skb);
588 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
589 } else if (skb == sk->sk_send_head)
590 tcp_push_one(sk, mss_now);
591 continue;
592
593wait_for_sndbuf:
594 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
595wait_for_memory:
596 if (copied)
597 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
598
599 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
600 goto do_error;
601
602 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 603 size_goal = tp->xmit_size_goal;
1da177e4
LT
604 }
605
606out:
607 if (copied)
608 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
609 return copied;
610
611do_error:
612 if (copied)
613 goto out;
614out_err:
615 return sk_stream_error(sk, flags, err);
616}
617
618ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
619 size_t size, int flags)
620{
621 ssize_t res;
622 struct sock *sk = sock->sk;
623
624#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
625
626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
627 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
628 return sock_no_sendpage(sock, page, offset, size, flags);
629
630#undef TCP_ZC_CSUM_FLAGS
631
632 lock_sock(sk);
633 TCP_CHECK_TIMER(sk);
634 res = do_tcp_sendpages(sk, &page, offset, size, flags);
635 TCP_CHECK_TIMER(sk);
636 release_sock(sk);
637 return res;
638}
639
640#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
641#define TCP_OFF(sk) (sk->sk_sndmsg_off)
642
643static inline int select_size(struct sock *sk, struct tcp_sock *tp)
644{
c1b4a7e6 645 int tmp = tp->mss_cache;
1da177e4 646
b4e26f5e
DM
647 if (sk->sk_route_caps & NETIF_F_SG) {
648 if (sk->sk_route_caps & NETIF_F_TSO)
649 tmp = 0;
650 else {
651 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
652
653 if (tmp >= pgbreak &&
654 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
655 tmp = pgbreak;
656 }
657 }
1da177e4 658
1da177e4
LT
659 return tmp;
660}
661
662int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
663 size_t size)
664{
665 struct iovec *iov;
666 struct tcp_sock *tp = tcp_sk(sk);
667 struct sk_buff *skb;
668 int iovlen, flags;
c1b4a7e6 669 int mss_now, size_goal;
1da177e4
LT
670 int err, copied;
671 long timeo;
672
673 lock_sock(sk);
674 TCP_CHECK_TIMER(sk);
675
676 flags = msg->msg_flags;
677 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
678
679 /* Wait for a connection to finish. */
680 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
681 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
682 goto out_err;
683
684 /* This should be in poll */
685 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
686
687 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 688 size_goal = tp->xmit_size_goal;
1da177e4
LT
689
690 /* Ok commence sending. */
691 iovlen = msg->msg_iovlen;
692 iov = msg->msg_iov;
693 copied = 0;
694
695 err = -EPIPE;
696 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
697 goto do_error;
698
699 while (--iovlen >= 0) {
700 int seglen = iov->iov_len;
701 unsigned char __user *from = iov->iov_base;
702
703 iov++;
704
705 while (seglen > 0) {
706 int copy;
707
708 skb = sk->sk_write_queue.prev;
709
710 if (!sk->sk_send_head ||
c1b4a7e6 711 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
712
713new_segment:
714 /* Allocate new segment. If the interface is SG,
715 * allocate skb fitting to single page.
716 */
717 if (!sk_stream_memory_free(sk))
718 goto wait_for_sndbuf;
719
720 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
721 0, sk->sk_allocation);
722 if (!skb)
723 goto wait_for_memory;
724
725 /*
726 * Check whether we can use HW checksum.
727 */
728 if (sk->sk_route_caps &
729 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
730 NETIF_F_HW_CSUM))
731 skb->ip_summed = CHECKSUM_HW;
732
733 skb_entail(sk, tp, skb);
c1b4a7e6 734 copy = size_goal;
1da177e4
LT
735 }
736
737 /* Try to append data to the end of skb. */
738 if (copy > seglen)
739 copy = seglen;
740
741 /* Where to copy to? */
742 if (skb_tailroom(skb) > 0) {
743 /* We have some space in skb head. Superb! */
744 if (copy > skb_tailroom(skb))
745 copy = skb_tailroom(skb);
746 if ((err = skb_add_data(skb, from, copy)) != 0)
747 goto do_fault;
748 } else {
749 int merge = 0;
750 int i = skb_shinfo(skb)->nr_frags;
751 struct page *page = TCP_PAGE(sk);
752 int off = TCP_OFF(sk);
753
754 if (skb_can_coalesce(skb, i, page, off) &&
755 off != PAGE_SIZE) {
756 /* We can extend the last page
757 * fragment. */
758 merge = 1;
759 } else if (i == MAX_SKB_FRAGS ||
760 (!i &&
761 !(sk->sk_route_caps & NETIF_F_SG))) {
762 /* Need to add new fragment and cannot
763 * do this because interface is non-SG,
764 * or because all the page slots are
765 * busy. */
766 tcp_mark_push(tp, skb);
767 goto new_segment;
768 } else if (page) {
1da177e4
LT
769 if (off == PAGE_SIZE) {
770 put_page(page);
771 TCP_PAGE(sk) = page = NULL;
772 }
773 }
774
775 if (!page) {
776 /* Allocate new cache page. */
777 if (!(page = sk_stream_alloc_page(sk)))
778 goto wait_for_memory;
779 off = 0;
780 }
781
782 if (copy > PAGE_SIZE - off)
783 copy = PAGE_SIZE - off;
784
785 /* Time to copy data. We are close to
786 * the end! */
787 err = skb_copy_to_page(sk, from, skb, page,
788 off, copy);
789 if (err) {
790 /* If this page was new, give it to the
791 * socket so it does not get leaked.
792 */
793 if (!TCP_PAGE(sk)) {
794 TCP_PAGE(sk) = page;
795 TCP_OFF(sk) = 0;
796 }
797 goto do_error;
798 }
799
800 /* Update the skb. */
801 if (merge) {
802 skb_shinfo(skb)->frags[i - 1].size +=
803 copy;
804 } else {
805 skb_fill_page_desc(skb, i, page, off, copy);
806 if (TCP_PAGE(sk)) {
807 get_page(page);
808 } else if (off + copy < PAGE_SIZE) {
809 get_page(page);
810 TCP_PAGE(sk) = page;
811 }
812 }
813
814 TCP_OFF(sk) = off + copy;
815 }
816
817 if (!copied)
818 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
819
820 tp->write_seq += copy;
821 TCP_SKB_CB(skb)->end_seq += copy;
822 skb_shinfo(skb)->tso_segs = 0;
823
824 from += copy;
825 copied += copy;
826 if ((seglen -= copy) == 0 && iovlen == 0)
827 goto out;
828
c1b4a7e6 829 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
830 continue;
831
832 if (forced_push(tp)) {
833 tcp_mark_push(tp, skb);
834 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
835 } else if (skb == sk->sk_send_head)
836 tcp_push_one(sk, mss_now);
837 continue;
838
839wait_for_sndbuf:
840 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
841wait_for_memory:
842 if (copied)
843 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
844
845 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
846 goto do_error;
847
848 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 849 size_goal = tp->xmit_size_goal;
1da177e4
LT
850 }
851 }
852
853out:
854 if (copied)
855 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
856 TCP_CHECK_TIMER(sk);
857 release_sock(sk);
858 return copied;
859
860do_fault:
861 if (!skb->len) {
862 if (sk->sk_send_head == skb)
863 sk->sk_send_head = NULL;
8728b834 864 __skb_unlink(skb, &sk->sk_write_queue);
1da177e4
LT
865 sk_stream_free_skb(sk, skb);
866 }
867
868do_error:
869 if (copied)
870 goto out;
871out_err:
872 err = sk_stream_error(sk, flags, err);
873 TCP_CHECK_TIMER(sk);
874 release_sock(sk);
875 return err;
876}
877
878/*
879 * Handle reading urgent data. BSD has very simple semantics for
880 * this, no blocking and very strange errors 8)
881 */
882
883static int tcp_recv_urg(struct sock *sk, long timeo,
884 struct msghdr *msg, int len, int flags,
885 int *addr_len)
886{
887 struct tcp_sock *tp = tcp_sk(sk);
888
889 /* No URG data to read. */
890 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
891 tp->urg_data == TCP_URG_READ)
892 return -EINVAL; /* Yes this is right ! */
893
894 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
895 return -ENOTCONN;
896
897 if (tp->urg_data & TCP_URG_VALID) {
898 int err = 0;
899 char c = tp->urg_data;
900
901 if (!(flags & MSG_PEEK))
902 tp->urg_data = TCP_URG_READ;
903
904 /* Read urgent data. */
905 msg->msg_flags |= MSG_OOB;
906
907 if (len > 0) {
908 if (!(flags & MSG_TRUNC))
909 err = memcpy_toiovec(msg->msg_iov, &c, 1);
910 len = 1;
911 } else
912 msg->msg_flags |= MSG_TRUNC;
913
914 return err ? -EFAULT : len;
915 }
916
917 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
918 return 0;
919
920 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
921 * the available implementations agree in this case:
922 * this call should never block, independent of the
923 * blocking state of the socket.
924 * Mike <pall@rz.uni-karlsruhe.de>
925 */
926 return -EAGAIN;
927}
928
929/* Clean up the receive buffer for full frames taken by the user,
930 * then send an ACK if necessary. COPIED is the number of bytes
931 * tcp_recvmsg has given to the user so far, it speeds up the
932 * calculation of whether or not we must ACK for the sake of
933 * a window update.
934 */
935static void cleanup_rbuf(struct sock *sk, int copied)
936{
937 struct tcp_sock *tp = tcp_sk(sk);
938 int time_to_ack = 0;
939
940#if TCP_DEBUG
941 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
942
943 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
944#endif
945
463c84b9
ACM
946 if (inet_csk_ack_scheduled(sk)) {
947 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
948 /* Delayed ACKs frequently hit locked sockets during bulk
949 * receive. */
463c84b9 950 if (icsk->icsk_ack.blocked ||
1da177e4 951 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 952 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
953 /*
954 * If this read emptied read buffer, we send ACK, if
955 * connection is not bidirectional, user drained
956 * receive buffer and there was a small segment
957 * in queue.
958 */
463c84b9
ACM
959 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
960 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
961 time_to_ack = 1;
962 }
963
964 /* We send an ACK if we can now advertise a non-zero window
965 * which has been raised "significantly".
966 *
967 * Even if window raised up to infinity, do not send window open ACK
968 * in states, where we will not receive more. It is useless.
969 */
970 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
971 __u32 rcv_window_now = tcp_receive_window(tp);
972
973 /* Optimize, __tcp_select_window() is not cheap. */
974 if (2*rcv_window_now <= tp->window_clamp) {
975 __u32 new_window = __tcp_select_window(sk);
976
977 /* Send ACK now, if this read freed lots of space
978 * in our buffer. Certainly, new_window is new window.
979 * We can advertise it now, if it is not less than current one.
980 * "Lots" means "at least twice" here.
981 */
982 if (new_window && new_window >= 2 * rcv_window_now)
983 time_to_ack = 1;
984 }
985 }
986 if (time_to_ack)
987 tcp_send_ack(sk);
988}
989
990static void tcp_prequeue_process(struct sock *sk)
991{
992 struct sk_buff *skb;
993 struct tcp_sock *tp = tcp_sk(sk);
994
b03efcfb 995 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
996
997 /* RX process wants to run with disabled BHs, though it is not
998 * necessary */
999 local_bh_disable();
1000 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1001 sk->sk_backlog_rcv(sk, skb);
1002 local_bh_enable();
1003
1004 /* Clear memory counter. */
1005 tp->ucopy.memory = 0;
1006}
1007
1008static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1009{
1010 struct sk_buff *skb;
1011 u32 offset;
1012
1013 skb_queue_walk(&sk->sk_receive_queue, skb) {
1014 offset = seq - TCP_SKB_CB(skb)->seq;
1015 if (skb->h.th->syn)
1016 offset--;
1017 if (offset < skb->len || skb->h.th->fin) {
1018 *off = offset;
1019 return skb;
1020 }
1021 }
1022 return NULL;
1023}
1024
1025/*
1026 * This routine provides an alternative to tcp_recvmsg() for routines
1027 * that would like to handle copying from skbuffs directly in 'sendfile'
1028 * fashion.
1029 * Note:
1030 * - It is assumed that the socket was locked by the caller.
1031 * - The routine does not block.
1032 * - At present, there is no support for reading OOB data
1033 * or for 'peeking' the socket using this routine
1034 * (although both would be easy to implement).
1035 */
1036int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1037 sk_read_actor_t recv_actor)
1038{
1039 struct sk_buff *skb;
1040 struct tcp_sock *tp = tcp_sk(sk);
1041 u32 seq = tp->copied_seq;
1042 u32 offset;
1043 int copied = 0;
1044
1045 if (sk->sk_state == TCP_LISTEN)
1046 return -ENOTCONN;
1047 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1048 if (offset < skb->len) {
1049 size_t used, len;
1050
1051 len = skb->len - offset;
1052 /* Stop reading if we hit a patch of urgent data */
1053 if (tp->urg_data) {
1054 u32 urg_offset = tp->urg_seq - seq;
1055 if (urg_offset < len)
1056 len = urg_offset;
1057 if (!len)
1058 break;
1059 }
1060 used = recv_actor(desc, skb, offset, len);
1061 if (used <= len) {
1062 seq += used;
1063 copied += used;
1064 offset += used;
1065 }
1066 if (offset != skb->len)
1067 break;
1068 }
1069 if (skb->h.th->fin) {
1070 sk_eat_skb(sk, skb);
1071 ++seq;
1072 break;
1073 }
1074 sk_eat_skb(sk, skb);
1075 if (!desc->count)
1076 break;
1077 }
1078 tp->copied_seq = seq;
1079
1080 tcp_rcv_space_adjust(sk);
1081
1082 /* Clean up data we have read: This will do ACK frames. */
1083 if (copied)
1084 cleanup_rbuf(sk, copied);
1085 return copied;
1086}
1087
1088/*
1089 * This routine copies from a sock struct into the user buffer.
1090 *
1091 * Technical note: in 2.3 we work on _locked_ socket, so that
1092 * tricks with *seq access order and skb->users are not required.
1093 * Probably, code can be easily improved even more.
1094 */
1095
1096int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1097 size_t len, int nonblock, int flags, int *addr_len)
1098{
1099 struct tcp_sock *tp = tcp_sk(sk);
1100 int copied = 0;
1101 u32 peek_seq;
1102 u32 *seq;
1103 unsigned long used;
1104 int err;
1105 int target; /* Read at least this many bytes */
1106 long timeo;
1107 struct task_struct *user_recv = NULL;
1108
1109 lock_sock(sk);
1110
1111 TCP_CHECK_TIMER(sk);
1112
1113 err = -ENOTCONN;
1114 if (sk->sk_state == TCP_LISTEN)
1115 goto out;
1116
1117 timeo = sock_rcvtimeo(sk, nonblock);
1118
1119 /* Urgent data needs to be handled specially. */
1120 if (flags & MSG_OOB)
1121 goto recv_urg;
1122
1123 seq = &tp->copied_seq;
1124 if (flags & MSG_PEEK) {
1125 peek_seq = tp->copied_seq;
1126 seq = &peek_seq;
1127 }
1128
1129 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1130
1131 do {
1132 struct sk_buff *skb;
1133 u32 offset;
1134
1135 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1136 if (tp->urg_data && tp->urg_seq == *seq) {
1137 if (copied)
1138 break;
1139 if (signal_pending(current)) {
1140 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1141 break;
1142 }
1143 }
1144
1145 /* Next get a buffer. */
1146
1147 skb = skb_peek(&sk->sk_receive_queue);
1148 do {
1149 if (!skb)
1150 break;
1151
1152 /* Now that we have two receive queues this
1153 * shouldn't happen.
1154 */
1155 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1156 printk(KERN_INFO "recvmsg bug: copied %X "
1157 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1158 break;
1159 }
1160 offset = *seq - TCP_SKB_CB(skb)->seq;
1161 if (skb->h.th->syn)
1162 offset--;
1163 if (offset < skb->len)
1164 goto found_ok_skb;
1165 if (skb->h.th->fin)
1166 goto found_fin_ok;
1167 BUG_TRAP(flags & MSG_PEEK);
1168 skb = skb->next;
1169 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1170
1171 /* Well, if we have backlog, try to process it now yet. */
1172
1173 if (copied >= target && !sk->sk_backlog.tail)
1174 break;
1175
1176 if (copied) {
1177 if (sk->sk_err ||
1178 sk->sk_state == TCP_CLOSE ||
1179 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1180 !timeo ||
1181 signal_pending(current) ||
1182 (flags & MSG_PEEK))
1183 break;
1184 } else {
1185 if (sock_flag(sk, SOCK_DONE))
1186 break;
1187
1188 if (sk->sk_err) {
1189 copied = sock_error(sk);
1190 break;
1191 }
1192
1193 if (sk->sk_shutdown & RCV_SHUTDOWN)
1194 break;
1195
1196 if (sk->sk_state == TCP_CLOSE) {
1197 if (!sock_flag(sk, SOCK_DONE)) {
1198 /* This occurs when user tries to read
1199 * from never connected socket.
1200 */
1201 copied = -ENOTCONN;
1202 break;
1203 }
1204 break;
1205 }
1206
1207 if (!timeo) {
1208 copied = -EAGAIN;
1209 break;
1210 }
1211
1212 if (signal_pending(current)) {
1213 copied = sock_intr_errno(timeo);
1214 break;
1215 }
1216 }
1217
1218 cleanup_rbuf(sk, copied);
1219
7df55125 1220 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1221 /* Install new reader */
1222 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1223 user_recv = current;
1224 tp->ucopy.task = user_recv;
1225 tp->ucopy.iov = msg->msg_iov;
1226 }
1227
1228 tp->ucopy.len = len;
1229
1230 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1231 (flags & (MSG_PEEK | MSG_TRUNC)));
1232
1233 /* Ugly... If prequeue is not empty, we have to
1234 * process it before releasing socket, otherwise
1235 * order will be broken at second iteration.
1236 * More elegant solution is required!!!
1237 *
1238 * Look: we have the following (pseudo)queues:
1239 *
1240 * 1. packets in flight
1241 * 2. backlog
1242 * 3. prequeue
1243 * 4. receive_queue
1244 *
1245 * Each queue can be processed only if the next ones
1246 * are empty. At this point we have empty receive_queue.
1247 * But prequeue _can_ be not empty after 2nd iteration,
1248 * when we jumped to start of loop because backlog
1249 * processing added something to receive_queue.
1250 * We cannot release_sock(), because backlog contains
1251 * packets arrived _after_ prequeued ones.
1252 *
1253 * Shortly, algorithm is clear --- to process all
1254 * the queues in order. We could make it more directly,
1255 * requeueing packets from backlog to prequeue, if
1256 * is not empty. It is more elegant, but eats cycles,
1257 * unfortunately.
1258 */
b03efcfb 1259 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1260 goto do_prequeue;
1261
1262 /* __ Set realtime policy in scheduler __ */
1263 }
1264
1265 if (copied >= target) {
1266 /* Do not sleep, just process backlog. */
1267 release_sock(sk);
1268 lock_sock(sk);
1269 } else
1270 sk_wait_data(sk, &timeo);
1271
1272 if (user_recv) {
1273 int chunk;
1274
1275 /* __ Restore normal policy in scheduler __ */
1276
1277 if ((chunk = len - tp->ucopy.len) != 0) {
1278 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1279 len -= chunk;
1280 copied += chunk;
1281 }
1282
1283 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1284 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1285do_prequeue:
1286 tcp_prequeue_process(sk);
1287
1288 if ((chunk = len - tp->ucopy.len) != 0) {
1289 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1290 len -= chunk;
1291 copied += chunk;
1292 }
1293 }
1294 }
1295 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1296 if (net_ratelimit())
1297 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1298 current->comm, current->pid);
1299 peek_seq = tp->copied_seq;
1300 }
1301 continue;
1302
1303 found_ok_skb:
1304 /* Ok so how much can we use? */
1305 used = skb->len - offset;
1306 if (len < used)
1307 used = len;
1308
1309 /* Do we have urgent data here? */
1310 if (tp->urg_data) {
1311 u32 urg_offset = tp->urg_seq - *seq;
1312 if (urg_offset < used) {
1313 if (!urg_offset) {
1314 if (!sock_flag(sk, SOCK_URGINLINE)) {
1315 ++*seq;
1316 offset++;
1317 used--;
1318 if (!used)
1319 goto skip_copy;
1320 }
1321 } else
1322 used = urg_offset;
1323 }
1324 }
1325
1326 if (!(flags & MSG_TRUNC)) {
1327 err = skb_copy_datagram_iovec(skb, offset,
1328 msg->msg_iov, used);
1329 if (err) {
1330 /* Exception. Bailout! */
1331 if (!copied)
1332 copied = -EFAULT;
1333 break;
1334 }
1335 }
1336
1337 *seq += used;
1338 copied += used;
1339 len -= used;
1340
1341 tcp_rcv_space_adjust(sk);
1342
1343skip_copy:
1344 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1345 tp->urg_data = 0;
1346 tcp_fast_path_check(sk, tp);
1347 }
1348 if (used + offset < skb->len)
1349 continue;
1350
1351 if (skb->h.th->fin)
1352 goto found_fin_ok;
1353 if (!(flags & MSG_PEEK))
1354 sk_eat_skb(sk, skb);
1355 continue;
1356
1357 found_fin_ok:
1358 /* Process the FIN. */
1359 ++*seq;
1360 if (!(flags & MSG_PEEK))
1361 sk_eat_skb(sk, skb);
1362 break;
1363 } while (len > 0);
1364
1365 if (user_recv) {
b03efcfb 1366 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1367 int chunk;
1368
1369 tp->ucopy.len = copied > 0 ? len : 0;
1370
1371 tcp_prequeue_process(sk);
1372
1373 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1374 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1375 len -= chunk;
1376 copied += chunk;
1377 }
1378 }
1379
1380 tp->ucopy.task = NULL;
1381 tp->ucopy.len = 0;
1382 }
1383
1384 /* According to UNIX98, msg_name/msg_namelen are ignored
1385 * on connected socket. I was just happy when found this 8) --ANK
1386 */
1387
1388 /* Clean up data we have read: This will do ACK frames. */
1389 cleanup_rbuf(sk, copied);
1390
1391 TCP_CHECK_TIMER(sk);
1392 release_sock(sk);
1393 return copied;
1394
1395out:
1396 TCP_CHECK_TIMER(sk);
1397 release_sock(sk);
1398 return err;
1399
1400recv_urg:
1401 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1402 goto out;
1403}
1404
1405/*
1406 * State processing on a close. This implements the state shift for
1407 * sending our FIN frame. Note that we only send a FIN for some
1408 * states. A shutdown() may have already sent the FIN, or we may be
1409 * closed.
1410 */
1411
1412static unsigned char new_state[16] = {
1413 /* current state: new state: action: */
1414 /* (Invalid) */ TCP_CLOSE,
1415 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1416 /* TCP_SYN_SENT */ TCP_CLOSE,
1417 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1418 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1419 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1420 /* TCP_TIME_WAIT */ TCP_CLOSE,
1421 /* TCP_CLOSE */ TCP_CLOSE,
1422 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1423 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1424 /* TCP_LISTEN */ TCP_CLOSE,
1425 /* TCP_CLOSING */ TCP_CLOSING,
1426};
1427
1428static int tcp_close_state(struct sock *sk)
1429{
1430 int next = (int)new_state[sk->sk_state];
1431 int ns = next & TCP_STATE_MASK;
1432
1433 tcp_set_state(sk, ns);
1434
1435 return next & TCP_ACTION_FIN;
1436}
1437
1438/*
1439 * Shutdown the sending side of a connection. Much like close except
1440 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1441 */
1442
1443void tcp_shutdown(struct sock *sk, int how)
1444{
1445 /* We need to grab some memory, and put together a FIN,
1446 * and then put it into the queue to be sent.
1447 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1448 */
1449 if (!(how & SEND_SHUTDOWN))
1450 return;
1451
1452 /* If we've already sent a FIN, or it's a closed state, skip this. */
1453 if ((1 << sk->sk_state) &
1454 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1455 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1456 /* Clear out any half completed packets. FIN if needed. */
1457 if (tcp_close_state(sk))
1458 tcp_send_fin(sk);
1459 }
1460}
1461
1da177e4
LT
1462void tcp_close(struct sock *sk, long timeout)
1463{
1464 struct sk_buff *skb;
1465 int data_was_unread = 0;
1466
1467 lock_sock(sk);
1468 sk->sk_shutdown = SHUTDOWN_MASK;
1469
1470 if (sk->sk_state == TCP_LISTEN) {
1471 tcp_set_state(sk, TCP_CLOSE);
1472
1473 /* Special case. */
0a5578cf 1474 inet_csk_listen_stop(sk);
1da177e4
LT
1475
1476 goto adjudge_to_death;
1477 }
1478
1479 /* We need to flush the recv. buffs. We do this only on the
1480 * descriptor close, not protocol-sourced closes, because the
1481 * reader process may not have drained the data yet!
1482 */
1483 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1484 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1485 skb->h.th->fin;
1486 data_was_unread += len;
1487 __kfree_skb(skb);
1488 }
1489
1490 sk_stream_mem_reclaim(sk);
1491
1492 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1493 * 3.10, we send a RST here because data was lost. To
1494 * witness the awful effects of the old behavior of always
1495 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1496 * a bulk GET in an FTP client, suspend the process, wait
1497 * for the client to advertise a zero window, then kill -9
1498 * the FTP client, wheee... Note: timeout is always zero
1499 * in such a case.
1500 */
1501 if (data_was_unread) {
1502 /* Unread data was tossed, zap the connection. */
1503 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1504 tcp_set_state(sk, TCP_CLOSE);
1505 tcp_send_active_reset(sk, GFP_KERNEL);
1506 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1507 /* Check zero linger _after_ checking for unread data. */
1508 sk->sk_prot->disconnect(sk, 0);
1509 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1510 } else if (tcp_close_state(sk)) {
1511 /* We FIN if the application ate all the data before
1512 * zapping the connection.
1513 */
1514
1515 /* RED-PEN. Formally speaking, we have broken TCP state
1516 * machine. State transitions:
1517 *
1518 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1519 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1520 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1521 *
1522 * are legal only when FIN has been sent (i.e. in window),
1523 * rather than queued out of window. Purists blame.
1524 *
1525 * F.e. "RFC state" is ESTABLISHED,
1526 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1527 *
1528 * The visible declinations are that sometimes
1529 * we enter time-wait state, when it is not required really
1530 * (harmless), do not send active resets, when they are
1531 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1532 * they look as CLOSING or LAST_ACK for Linux)
1533 * Probably, I missed some more holelets.
1534 * --ANK
1535 */
1536 tcp_send_fin(sk);
1537 }
1538
1539 sk_stream_wait_close(sk, timeout);
1540
1541adjudge_to_death:
1542 /* It is the last release_sock in its life. It will remove backlog. */
1543 release_sock(sk);
1544
1545
1546 /* Now socket is owned by kernel and we acquire BH lock
1547 to finish close. No need to check for user refs.
1548 */
1549 local_bh_disable();
1550 bh_lock_sock(sk);
1551 BUG_TRAP(!sock_owned_by_user(sk));
1552
1553 sock_hold(sk);
1554 sock_orphan(sk);
1555
1556 /* This is a (useful) BSD violating of the RFC. There is a
1557 * problem with TCP as specified in that the other end could
1558 * keep a socket open forever with no application left this end.
1559 * We use a 3 minute timeout (about the same as BSD) then kill
1560 * our end. If they send after that then tough - BUT: long enough
1561 * that we won't make the old 4*rto = almost no time - whoops
1562 * reset mistake.
1563 *
1564 * Nope, it was not mistake. It is really desired behaviour
1565 * f.e. on http servers, when such sockets are useless, but
1566 * consume significant resources. Let's do it with special
1567 * linger2 option. --ANK
1568 */
1569
1570 if (sk->sk_state == TCP_FIN_WAIT2) {
1571 struct tcp_sock *tp = tcp_sk(sk);
1572 if (tp->linger2 < 0) {
1573 tcp_set_state(sk, TCP_CLOSE);
1574 tcp_send_active_reset(sk, GFP_ATOMIC);
1575 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1576 } else {
463c84b9 1577 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1578
1579 if (tmo > TCP_TIMEWAIT_LEN) {
463c84b9 1580 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1da177e4 1581 } else {
0a5578cf 1582 atomic_inc(sk->sk_prot->orphan_count);
1da177e4
LT
1583 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1584 goto out;
1585 }
1586 }
1587 }
1588 if (sk->sk_state != TCP_CLOSE) {
1589 sk_stream_mem_reclaim(sk);
0a5578cf 1590 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1da177e4
LT
1591 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1592 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1593 if (net_ratelimit())
1594 printk(KERN_INFO "TCP: too many of orphaned "
1595 "sockets\n");
1596 tcp_set_state(sk, TCP_CLOSE);
1597 tcp_send_active_reset(sk, GFP_ATOMIC);
1598 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1599 }
1600 }
0a5578cf 1601 atomic_inc(sk->sk_prot->orphan_count);
1da177e4
LT
1602
1603 if (sk->sk_state == TCP_CLOSE)
0a5578cf 1604 inet_csk_destroy_sock(sk);
1da177e4
LT
1605 /* Otherwise, socket is reprieved until protocol close. */
1606
1607out:
1608 bh_unlock_sock(sk);
1609 local_bh_enable();
1610 sock_put(sk);
1611}
1612
1613/* These states need RST on ABORT according to RFC793 */
1614
1615static inline int tcp_need_reset(int state)
1616{
1617 return (1 << state) &
1618 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1619 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1620}
1621
1622int tcp_disconnect(struct sock *sk, int flags)
1623{
1624 struct inet_sock *inet = inet_sk(sk);
463c84b9 1625 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1626 struct tcp_sock *tp = tcp_sk(sk);
1627 int err = 0;
1628 int old_state = sk->sk_state;
1629
1630 if (old_state != TCP_CLOSE)
1631 tcp_set_state(sk, TCP_CLOSE);
1632
1633 /* ABORT function of RFC793 */
1634 if (old_state == TCP_LISTEN) {
0a5578cf 1635 inet_csk_listen_stop(sk);
1da177e4
LT
1636 } else if (tcp_need_reset(old_state) ||
1637 (tp->snd_nxt != tp->write_seq &&
1638 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1639 /* The last check adjusts for discrepance of Linux wrt. RFC
1640 * states
1641 */
1642 tcp_send_active_reset(sk, gfp_any());
1643 sk->sk_err = ECONNRESET;
1644 } else if (old_state == TCP_SYN_SENT)
1645 sk->sk_err = ECONNRESET;
1646
1647 tcp_clear_xmit_timers(sk);
1648 __skb_queue_purge(&sk->sk_receive_queue);
1649 sk_stream_writequeue_purge(sk);
1650 __skb_queue_purge(&tp->out_of_order_queue);
1651
1652 inet->dport = 0;
1653
1654 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1655 inet_reset_saddr(sk);
1656
1657 sk->sk_shutdown = 0;
1658 sock_reset_flag(sk, SOCK_DONE);
1659 tp->srtt = 0;
1660 if ((tp->write_seq += tp->max_window + 2) == 0)
1661 tp->write_seq = 1;
463c84b9 1662 icsk->icsk_backoff = 0;
1da177e4 1663 tp->snd_cwnd = 2;
6687e988 1664 icsk->icsk_probes_out = 0;
1da177e4
LT
1665 tp->packets_out = 0;
1666 tp->snd_ssthresh = 0x7fffffff;
1667 tp->snd_cwnd_cnt = 0;
6687e988 1668 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 1669 tcp_clear_retrans(tp);
463c84b9 1670 inet_csk_delack_init(sk);
1da177e4
LT
1671 sk->sk_send_head = NULL;
1672 tp->rx_opt.saw_tstamp = 0;
1673 tcp_sack_reset(&tp->rx_opt);
1674 __sk_dst_reset(sk);
1675
463c84b9 1676 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1677
1678 sk->sk_error_report(sk);
1679 return err;
1680}
1681
1da177e4
LT
1682/*
1683 * Socket option code for TCP.
1684 */
1685int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1686 int optlen)
1687{
1688 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1689 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1690 int val;
1691 int err = 0;
1692
1693 if (level != SOL_TCP)
1694 return tp->af_specific->setsockopt(sk, level, optname,
1695 optval, optlen);
1696
5f8ef48d
SH
1697 /* This is a string value all the others are int's */
1698 if (optname == TCP_CONGESTION) {
1699 char name[TCP_CA_NAME_MAX];
1700
1701 if (optlen < 1)
1702 return -EINVAL;
1703
1704 val = strncpy_from_user(name, optval,
1705 min(TCP_CA_NAME_MAX-1, optlen));
1706 if (val < 0)
1707 return -EFAULT;
1708 name[val] = 0;
1709
1710 lock_sock(sk);
6687e988 1711 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
1712 release_sock(sk);
1713 return err;
1714 }
1715
1da177e4
LT
1716 if (optlen < sizeof(int))
1717 return -EINVAL;
1718
1719 if (get_user(val, (int __user *)optval))
1720 return -EFAULT;
1721
1722 lock_sock(sk);
1723
1724 switch (optname) {
1725 case TCP_MAXSEG:
1726 /* Values greater than interface MTU won't take effect. However
1727 * at the point when this call is done we typically don't yet
1728 * know which interface is going to be used */
1729 if (val < 8 || val > MAX_TCP_WINDOW) {
1730 err = -EINVAL;
1731 break;
1732 }
1733 tp->rx_opt.user_mss = val;
1734 break;
1735
1736 case TCP_NODELAY:
1737 if (val) {
1738 /* TCP_NODELAY is weaker than TCP_CORK, so that
1739 * this option on corked socket is remembered, but
1740 * it is not activated until cork is cleared.
1741 *
1742 * However, when TCP_NODELAY is set we make
1743 * an explicit push, which overrides even TCP_CORK
1744 * for currently queued segments.
1745 */
1746 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1747 tcp_push_pending_frames(sk, tp);
1748 } else {
1749 tp->nonagle &= ~TCP_NAGLE_OFF;
1750 }
1751 break;
1752
1753 case TCP_CORK:
1754 /* When set indicates to always queue non-full frames.
1755 * Later the user clears this option and we transmit
1756 * any pending partial frames in the queue. This is
1757 * meant to be used alongside sendfile() to get properly
1758 * filled frames when the user (for example) must write
1759 * out headers with a write() call first and then use
1760 * sendfile to send out the data parts.
1761 *
1762 * TCP_CORK can be set together with TCP_NODELAY and it is
1763 * stronger than TCP_NODELAY.
1764 */
1765 if (val) {
1766 tp->nonagle |= TCP_NAGLE_CORK;
1767 } else {
1768 tp->nonagle &= ~TCP_NAGLE_CORK;
1769 if (tp->nonagle&TCP_NAGLE_OFF)
1770 tp->nonagle |= TCP_NAGLE_PUSH;
1771 tcp_push_pending_frames(sk, tp);
1772 }
1773 break;
1774
1775 case TCP_KEEPIDLE:
1776 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1777 err = -EINVAL;
1778 else {
1779 tp->keepalive_time = val * HZ;
1780 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1781 !((1 << sk->sk_state) &
1782 (TCPF_CLOSE | TCPF_LISTEN))) {
1783 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1784 if (tp->keepalive_time > elapsed)
1785 elapsed = tp->keepalive_time - elapsed;
1786 else
1787 elapsed = 0;
463c84b9 1788 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
1789 }
1790 }
1791 break;
1792 case TCP_KEEPINTVL:
1793 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1794 err = -EINVAL;
1795 else
1796 tp->keepalive_intvl = val * HZ;
1797 break;
1798 case TCP_KEEPCNT:
1799 if (val < 1 || val > MAX_TCP_KEEPCNT)
1800 err = -EINVAL;
1801 else
1802 tp->keepalive_probes = val;
1803 break;
1804 case TCP_SYNCNT:
1805 if (val < 1 || val > MAX_TCP_SYNCNT)
1806 err = -EINVAL;
1807 else
463c84b9 1808 icsk->icsk_syn_retries = val;
1da177e4
LT
1809 break;
1810
1811 case TCP_LINGER2:
1812 if (val < 0)
1813 tp->linger2 = -1;
1814 else if (val > sysctl_tcp_fin_timeout / HZ)
1815 tp->linger2 = 0;
1816 else
1817 tp->linger2 = val * HZ;
1818 break;
1819
1820 case TCP_DEFER_ACCEPT:
295f7324 1821 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1da177e4
LT
1822 if (val > 0) {
1823 /* Translate value in seconds to number of
1824 * retransmits */
295f7324 1825 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1da177e4 1826 val > ((TCP_TIMEOUT_INIT / HZ) <<
295f7324
ACM
1827 icsk->icsk_accept_queue.rskq_defer_accept))
1828 icsk->icsk_accept_queue.rskq_defer_accept++;
1829 icsk->icsk_accept_queue.rskq_defer_accept++;
1da177e4
LT
1830 }
1831 break;
1832
1833 case TCP_WINDOW_CLAMP:
1834 if (!val) {
1835 if (sk->sk_state != TCP_CLOSE) {
1836 err = -EINVAL;
1837 break;
1838 }
1839 tp->window_clamp = 0;
1840 } else
1841 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1842 SOCK_MIN_RCVBUF / 2 : val;
1843 break;
1844
1845 case TCP_QUICKACK:
1846 if (!val) {
463c84b9 1847 icsk->icsk_ack.pingpong = 1;
1da177e4 1848 } else {
463c84b9 1849 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
1850 if ((1 << sk->sk_state) &
1851 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
1852 inet_csk_ack_scheduled(sk)) {
1853 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1da177e4
LT
1854 cleanup_rbuf(sk, 1);
1855 if (!(val & 1))
463c84b9 1856 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
1857 }
1858 }
1859 break;
1860
1861 default:
1862 err = -ENOPROTOOPT;
1863 break;
1864 };
1865 release_sock(sk);
1866 return err;
1867}
1868
1869/* Return information about state of tcp endpoint in API format. */
1870void tcp_get_info(struct sock *sk, struct tcp_info *info)
1871{
1872 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1873 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1874 u32 now = tcp_time_stamp;
1875
1876 memset(info, 0, sizeof(*info));
1877
1878 info->tcpi_state = sk->sk_state;
6687e988 1879 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 1880 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 1881 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 1882 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
1883
1884 if (tp->rx_opt.tstamp_ok)
1885 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1886 if (tp->rx_opt.sack_ok)
1887 info->tcpi_options |= TCPI_OPT_SACK;
1888 if (tp->rx_opt.wscale_ok) {
1889 info->tcpi_options |= TCPI_OPT_WSCALE;
1890 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
1891 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
1892 }
1893
1894 if (tp->ecn_flags&TCP_ECN_OK)
1895 info->tcpi_options |= TCPI_OPT_ECN;
1896
463c84b9
ACM
1897 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
1898 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 1899 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 1900 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
1901
1902 info->tcpi_unacked = tp->packets_out;
1903 info->tcpi_sacked = tp->sacked_out;
1904 info->tcpi_lost = tp->lost_out;
1905 info->tcpi_retrans = tp->retrans_out;
1906 info->tcpi_fackets = tp->fackets_out;
1907
1908 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 1909 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
1910 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
1911
1912 info->tcpi_pmtu = tp->pmtu_cookie;
1913 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
1914 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
1915 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
1916 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
1917 info->tcpi_snd_cwnd = tp->snd_cwnd;
1918 info->tcpi_advmss = tp->advmss;
1919 info->tcpi_reordering = tp->reordering;
1920
1921 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
1922 info->tcpi_rcv_space = tp->rcvq_space.space;
1923
1924 info->tcpi_total_retrans = tp->total_retrans;
1925}
1926
1927EXPORT_SYMBOL_GPL(tcp_get_info);
1928
1929int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
1930 int __user *optlen)
1931{
295f7324 1932 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1933 struct tcp_sock *tp = tcp_sk(sk);
1934 int val, len;
1935
1936 if (level != SOL_TCP)
1937 return tp->af_specific->getsockopt(sk, level, optname,
1938 optval, optlen);
1939
1940 if (get_user(len, optlen))
1941 return -EFAULT;
1942
1943 len = min_t(unsigned int, len, sizeof(int));
1944
1945 if (len < 0)
1946 return -EINVAL;
1947
1948 switch (optname) {
1949 case TCP_MAXSEG:
c1b4a7e6 1950 val = tp->mss_cache;
1da177e4
LT
1951 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
1952 val = tp->rx_opt.user_mss;
1953 break;
1954 case TCP_NODELAY:
1955 val = !!(tp->nonagle&TCP_NAGLE_OFF);
1956 break;
1957 case TCP_CORK:
1958 val = !!(tp->nonagle&TCP_NAGLE_CORK);
1959 break;
1960 case TCP_KEEPIDLE:
1961 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
1962 break;
1963 case TCP_KEEPINTVL:
1964 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
1965 break;
1966 case TCP_KEEPCNT:
1967 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1968 break;
1969 case TCP_SYNCNT:
295f7324 1970 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
1971 break;
1972 case TCP_LINGER2:
1973 val = tp->linger2;
1974 if (val >= 0)
1975 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
1976 break;
1977 case TCP_DEFER_ACCEPT:
295f7324
ACM
1978 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
1979 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
1da177e4
LT
1980 break;
1981 case TCP_WINDOW_CLAMP:
1982 val = tp->window_clamp;
1983 break;
1984 case TCP_INFO: {
1985 struct tcp_info info;
1986
1987 if (get_user(len, optlen))
1988 return -EFAULT;
1989
1990 tcp_get_info(sk, &info);
1991
1992 len = min_t(unsigned int, len, sizeof(info));
1993 if (put_user(len, optlen))
1994 return -EFAULT;
1995 if (copy_to_user(optval, &info, len))
1996 return -EFAULT;
1997 return 0;
1998 }
1999 case TCP_QUICKACK:
295f7324 2000 val = !icsk->icsk_ack.pingpong;
1da177e4 2001 break;
5f8ef48d
SH
2002
2003 case TCP_CONGESTION:
2004 if (get_user(len, optlen))
2005 return -EFAULT;
2006 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2007 if (put_user(len, optlen))
2008 return -EFAULT;
6687e988 2009 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
2010 return -EFAULT;
2011 return 0;
1da177e4
LT
2012 default:
2013 return -ENOPROTOOPT;
2014 };
2015
2016 if (put_user(len, optlen))
2017 return -EFAULT;
2018 if (copy_to_user(optval, &val, len))
2019 return -EFAULT;
2020 return 0;
2021}
2022
2023
2024extern void __skb_cb_too_small_for_tcp(int, int);
5f8ef48d 2025extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2026
2027static __initdata unsigned long thash_entries;
2028static int __init set_thash_entries(char *str)
2029{
2030 if (!str)
2031 return 0;
2032 thash_entries = simple_strtoul(str, &str, 0);
2033 return 1;
2034}
2035__setup("thash_entries=", set_thash_entries);
2036
2037void __init tcp_init(void)
2038{
2039 struct sk_buff *skb = NULL;
2040 int order, i;
2041
2042 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2043 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2044 sizeof(skb->cb));
2045
6e04e021
ACM
2046 tcp_hashinfo.bind_bucket_cachep =
2047 kmem_cache_create("tcp_bind_bucket",
2048 sizeof(struct inet_bind_bucket), 0,
2049 SLAB_HWCACHE_ALIGN, NULL, NULL);
2050 if (!tcp_hashinfo.bind_bucket_cachep)
1da177e4
LT
2051 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2052
1da177e4
LT
2053 /* Size and allocate the main established and bind bucket
2054 * hash tables.
2055 *
2056 * The methodology is similar to that of the buffer cache.
2057 */
6e04e021 2058 tcp_hashinfo.ehash =
1da177e4 2059 alloc_large_system_hash("TCP established",
0f7ff927 2060 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2061 thash_entries,
2062 (num_physpages >= 128 * 1024) ?
2063 (25 - PAGE_SHIFT) :
2064 (27 - PAGE_SHIFT),
2065 HASH_HIGHMEM,
6e04e021 2066 &tcp_hashinfo.ehash_size,
1da177e4
LT
2067 NULL,
2068 0);
6e04e021
ACM
2069 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2070 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2071 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2072 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
1da177e4
LT
2073 }
2074
6e04e021 2075 tcp_hashinfo.bhash =
1da177e4 2076 alloc_large_system_hash("TCP bind",
0f7ff927 2077 sizeof(struct inet_bind_hashbucket),
6e04e021 2078 tcp_hashinfo.ehash_size,
1da177e4
LT
2079 (num_physpages >= 128 * 1024) ?
2080 (25 - PAGE_SHIFT) :
2081 (27 - PAGE_SHIFT),
2082 HASH_HIGHMEM,
6e04e021 2083 &tcp_hashinfo.bhash_size,
1da177e4
LT
2084 NULL,
2085 64 * 1024);
6e04e021
ACM
2086 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2087 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2088 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2089 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2090 }
2091
2092 /* Try to be a bit smarter and adjust defaults depending
2093 * on available memory.
2094 */
2095 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2096 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2097 order++)
2098 ;
e7626486 2099 if (order >= 4) {
1da177e4
LT
2100 sysctl_local_port_range[0] = 32768;
2101 sysctl_local_port_range[1] = 61000;
295ff7ed 2102 tcp_death_row.sysctl_max_tw_buckets = 180000;
1da177e4
LT
2103 sysctl_tcp_max_orphans = 4096 << (order - 4);
2104 sysctl_max_syn_backlog = 1024;
2105 } else if (order < 3) {
2106 sysctl_local_port_range[0] = 1024 * (3 - order);
295ff7ed 2107 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
1da177e4
LT
2108 sysctl_tcp_max_orphans >>= (3 - order);
2109 sysctl_max_syn_backlog = 128;
2110 }
6e04e021 2111 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
1da177e4
LT
2112
2113 sysctl_tcp_mem[0] = 768 << order;
2114 sysctl_tcp_mem[1] = 1024 << order;
2115 sysctl_tcp_mem[2] = 1536 << order;
2116
2117 if (order < 3) {
2118 sysctl_tcp_wmem[2] = 64 * 1024;
2119 sysctl_tcp_rmem[0] = PAGE_SIZE;
2120 sysctl_tcp_rmem[1] = 43689;
2121 sysctl_tcp_rmem[2] = 2 * 43689;
2122 }
2123
2124 printk(KERN_INFO "TCP: Hash tables configured "
2125 "(established %d bind %d)\n",
6e04e021 2126 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
317a76f9
SH
2127
2128 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2129}
2130
1da177e4 2131EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2132EXPORT_SYMBOL(tcp_disconnect);
2133EXPORT_SYMBOL(tcp_getsockopt);
2134EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2135EXPORT_SYMBOL(tcp_poll);
2136EXPORT_SYMBOL(tcp_read_sock);
2137EXPORT_SYMBOL(tcp_recvmsg);
2138EXPORT_SYMBOL(tcp_sendmsg);
2139EXPORT_SYMBOL(tcp_sendpage);
2140EXPORT_SYMBOL(tcp_setsockopt);
2141EXPORT_SYMBOL(tcp_shutdown);
2142EXPORT_SYMBOL(tcp_statistics);