]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
8 | * Authors: Ross Biro | |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | * | |
20 | * Fixes: | |
21 | * Alan Cox : Numerous verify_area() calls | |
22 | * Alan Cox : Set the ACK bit on a reset | |
23 | * Alan Cox : Stopped it crashing if it closed while | |
24 | * sk->inuse=1 and was trying to connect | |
25 | * (tcp_err()). | |
26 | * Alan Cox : All icmp error handling was broken | |
27 | * pointers passed where wrong and the | |
28 | * socket was looked up backwards. Nobody | |
29 | * tested any icmp error code obviously. | |
30 | * Alan Cox : tcp_err() now handled properly. It | |
31 | * wakes people on errors. poll | |
32 | * behaves and the icmp error race | |
33 | * has gone by moving it into sock.c | |
34 | * Alan Cox : tcp_send_reset() fixed to work for | |
35 | * everything not just packets for | |
36 | * unknown sockets. | |
37 | * Alan Cox : tcp option processing. | |
38 | * Alan Cox : Reset tweaked (still not 100%) [Had | |
39 | * syn rule wrong] | |
40 | * Herp Rosmanith : More reset fixes | |
41 | * Alan Cox : No longer acks invalid rst frames. | |
42 | * Acking any kind of RST is right out. | |
43 | * Alan Cox : Sets an ignore me flag on an rst | |
44 | * receive otherwise odd bits of prattle | |
45 | * escape still | |
46 | * Alan Cox : Fixed another acking RST frame bug. | |
47 | * Should stop LAN workplace lockups. | |
48 | * Alan Cox : Some tidyups using the new skb list | |
49 | * facilities | |
50 | * Alan Cox : sk->keepopen now seems to work | |
51 | * Alan Cox : Pulls options out correctly on accepts | |
52 | * Alan Cox : Fixed assorted sk->rqueue->next errors | |
53 | * Alan Cox : PSH doesn't end a TCP read. Switched a | |
54 | * bit to skb ops. | |
55 | * Alan Cox : Tidied tcp_data to avoid a potential | |
56 | * nasty. | |
57 | * Alan Cox : Added some better commenting, as the | |
58 | * tcp is hard to follow | |
59 | * Alan Cox : Removed incorrect check for 20 * psh | |
60 | * Michael O'Reilly : ack < copied bug fix. | |
61 | * Johannes Stille : Misc tcp fixes (not all in yet). | |
62 | * Alan Cox : FIN with no memory -> CRASH | |
63 | * Alan Cox : Added socket option proto entries. | |
64 | * Also added awareness of them to accept. | |
65 | * Alan Cox : Added TCP options (SOL_TCP) | |
66 | * Alan Cox : Switched wakeup calls to callbacks, | |
67 | * so the kernel can layer network | |
68 | * sockets. | |
69 | * Alan Cox : Use ip_tos/ip_ttl settings. | |
70 | * Alan Cox : Handle FIN (more) properly (we hope). | |
71 | * Alan Cox : RST frames sent on unsynchronised | |
72 | * state ack error. | |
73 | * Alan Cox : Put in missing check for SYN bit. | |
74 | * Alan Cox : Added tcp_select_window() aka NET2E | |
75 | * window non shrink trick. | |
76 | * Alan Cox : Added a couple of small NET2E timer | |
77 | * fixes | |
78 | * Charles Hedrick : TCP fixes | |
79 | * Toomas Tamm : TCP window fixes | |
80 | * Alan Cox : Small URG fix to rlogin ^C ack fight | |
81 | * Charles Hedrick : Rewrote most of it to actually work | |
82 | * Linus : Rewrote tcp_read() and URG handling | |
83 | * completely | |
84 | * Gerhard Koerting: Fixed some missing timer handling | |
85 | * Matthew Dillon : Reworked TCP machine states as per RFC | |
86 | * Gerhard Koerting: PC/TCP workarounds | |
87 | * Adam Caldwell : Assorted timer/timing errors | |
88 | * Matthew Dillon : Fixed another RST bug | |
89 | * Alan Cox : Move to kernel side addressing changes. | |
90 | * Alan Cox : Beginning work on TCP fastpathing | |
91 | * (not yet usable) | |
92 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. | |
93 | * Alan Cox : TCP fast path debugging | |
94 | * Alan Cox : Window clamping | |
95 | * Michael Riepe : Bug in tcp_check() | |
96 | * Matt Dillon : More TCP improvements and RST bug fixes | |
97 | * Matt Dillon : Yet more small nasties remove from the | |
98 | * TCP code (Be very nice to this man if | |
99 | * tcp finally works 100%) 8) | |
100 | * Alan Cox : BSD accept semantics. | |
101 | * Alan Cox : Reset on closedown bug. | |
102 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). | |
103 | * Michael Pall : Handle poll() after URG properly in | |
104 | * all cases. | |
105 | * Michael Pall : Undo the last fix in tcp_read_urg() | |
106 | * (multi URG PUSH broke rlogin). | |
107 | * Michael Pall : Fix the multi URG PUSH problem in | |
108 | * tcp_readable(), poll() after URG | |
109 | * works now. | |
110 | * Michael Pall : recv(...,MSG_OOB) never blocks in the | |
111 | * BSD api. | |
112 | * Alan Cox : Changed the semantics of sk->socket to | |
113 | * fix a race and a signal problem with | |
114 | * accept() and async I/O. | |
115 | * Alan Cox : Relaxed the rules on tcp_sendto(). | |
116 | * Yury Shevchuk : Really fixed accept() blocking problem. | |
117 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for | |
118 | * clients/servers which listen in on | |
119 | * fixed ports. | |
120 | * Alan Cox : Cleaned the above up and shrank it to | |
121 | * a sensible code size. | |
122 | * Alan Cox : Self connect lockup fix. | |
123 | * Alan Cox : No connect to multicast. | |
124 | * Ross Biro : Close unaccepted children on master | |
125 | * socket close. | |
126 | * Alan Cox : Reset tracing code. | |
127 | * Alan Cox : Spurious resets on shutdown. | |
128 | * Alan Cox : Giant 15 minute/60 second timer error | |
129 | * Alan Cox : Small whoops in polling before an | |
130 | * accept. | |
131 | * Alan Cox : Kept the state trace facility since | |
132 | * it's handy for debugging. | |
133 | * Alan Cox : More reset handler fixes. | |
134 | * Alan Cox : Started rewriting the code based on | |
135 | * the RFC's for other useful protocol | |
136 | * references see: Comer, KA9Q NOS, and | |
137 | * for a reference on the difference | |
138 | * between specifications and how BSD | |
139 | * works see the 4.4lite source. | |
140 | * A.N.Kuznetsov : Don't time wait on completion of tidy | |
141 | * close. | |
142 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. | |
143 | * Linus Torvalds : Fixed BSD port reuse to work first syn | |
144 | * Alan Cox : Reimplemented timers as per the RFC | |
145 | * and using multiple timers for sanity. | |
146 | * Alan Cox : Small bug fixes, and a lot of new | |
147 | * comments. | |
148 | * Alan Cox : Fixed dual reader crash by locking | |
149 | * the buffers (much like datagram.c) | |
150 | * Alan Cox : Fixed stuck sockets in probe. A probe | |
151 | * now gets fed up of retrying without | |
152 | * (even a no space) answer. | |
153 | * Alan Cox : Extracted closing code better | |
154 | * Alan Cox : Fixed the closing state machine to | |
155 | * resemble the RFC. | |
156 | * Alan Cox : More 'per spec' fixes. | |
157 | * Jorge Cwik : Even faster checksumming. | |
158 | * Alan Cox : tcp_data() doesn't ack illegal PSH | |
159 | * only frames. At least one pc tcp stack | |
160 | * generates them. | |
161 | * Alan Cox : Cache last socket. | |
162 | * Alan Cox : Per route irtt. | |
163 | * Matt Day : poll()->select() match BSD precisely on error | |
164 | * Alan Cox : New buffers | |
165 | * Marc Tamsky : Various sk->prot->retransmits and | |
166 | * sk->retransmits misupdating fixed. | |
167 | * Fixed tcp_write_timeout: stuck close, | |
168 | * and TCP syn retries gets used now. | |
169 | * Mark Yarvis : In tcp_read_wakeup(), don't send an | |
170 | * ack if state is TCP_CLOSED. | |
171 | * Alan Cox : Look up device on a retransmit - routes may | |
172 | * change. Doesn't yet cope with MSS shrink right | |
173 | * but it's a start! | |
174 | * Marc Tamsky : Closing in closing fixes. | |
175 | * Mike Shaver : RFC1122 verifications. | |
176 | * Alan Cox : rcv_saddr errors. | |
177 | * Alan Cox : Block double connect(). | |
178 | * Alan Cox : Small hooks for enSKIP. | |
179 | * Alexey Kuznetsov: Path MTU discovery. | |
180 | * Alan Cox : Support soft errors. | |
181 | * Alan Cox : Fix MTU discovery pathological case | |
182 | * when the remote claims no mtu! | |
183 | * Marc Tamsky : TCP_CLOSE fix. | |
184 | * Colin (G3TNE) : Send a reset on syn ack replies in | |
185 | * window but wrong (fixes NT lpd problems) | |
186 | * Pedro Roque : Better TCP window handling, delayed ack. | |
187 | * Joerg Reuter : No modification of locked buffers in | |
188 | * tcp_do_retransmit() | |
189 | * Eric Schenk : Changed receiver side silly window | |
190 | * avoidance algorithm to BSD style | |
191 | * algorithm. This doubles throughput | |
192 | * against machines running Solaris, | |
193 | * and seems to result in general | |
194 | * improvement. | |
195 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD | |
196 | * Willy Konynenberg : Transparent proxying support. | |
197 | * Mike McLagan : Routing by source | |
198 | * Keith Owens : Do proper merging with partial SKB's in | |
199 | * tcp_do_sendmsg to avoid burstiness. | |
200 | * Eric Schenk : Fix fast close down bug with | |
201 | * shutdown() followed by close(). | |
202 | * Andi Kleen : Make poll agree with SIGIO | |
203 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and | |
204 | * lingertime == 0 (RFC 793 ABORT Call) | |
205 | * Hirokazu Takahashi : Use copy_from_user() instead of | |
206 | * csum_and_copy_from_user() if possible. | |
207 | * | |
208 | * This program is free software; you can redistribute it and/or | |
209 | * modify it under the terms of the GNU General Public License | |
210 | * as published by the Free Software Foundation; either version | |
211 | * 2 of the License, or(at your option) any later version. | |
212 | * | |
213 | * Description of States: | |
214 | * | |
215 | * TCP_SYN_SENT sent a connection request, waiting for ack | |
216 | * | |
217 | * TCP_SYN_RECV received a connection request, sent ack, | |
218 | * waiting for final ack in three-way handshake. | |
219 | * | |
220 | * TCP_ESTABLISHED connection established | |
221 | * | |
222 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete | |
223 | * transmission of remaining buffered data | |
224 | * | |
225 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote | |
226 | * to shutdown | |
227 | * | |
228 | * TCP_CLOSING both sides have shutdown but we still have | |
229 | * data we have to finish sending | |
230 | * | |
231 | * TCP_TIME_WAIT timeout to catch resent junk before entering | |
232 | * closed, can only be entered from FIN_WAIT2 | |
233 | * or CLOSING. Required because the other end | |
234 | * may not have gotten our last ACK causing it | |
235 | * to retransmit the data packet (which we ignore) | |
236 | * | |
237 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for | |
238 | * us to finish writing our data and to shutdown | |
239 | * (we have to close() to move on to LAST_ACK) | |
240 | * | |
241 | * TCP_LAST_ACK out side has shutdown after remote has | |
242 | * shutdown. There may still be data in our | |
243 | * buffer that we have to finish sending | |
244 | * | |
245 | * TCP_CLOSE socket is finished | |
246 | */ | |
247 | ||
248 | #include <linux/kernel.h> | |
249 | #include <linux/module.h> | |
250 | #include <linux/types.h> | |
251 | #include <linux/fcntl.h> | |
252 | #include <linux/poll.h> | |
253 | #include <linux/init.h> | |
254 | #include <linux/fs.h> | |
255 | #include <linux/skbuff.h> | |
256 | #include <linux/scatterlist.h> | |
257 | #include <linux/splice.h> | |
258 | #include <linux/net.h> | |
259 | #include <linux/socket.h> | |
260 | #include <linux/random.h> | |
261 | #include <linux/bootmem.h> | |
262 | #include <linux/highmem.h> | |
263 | #include <linux/swap.h> | |
264 | #include <linux/cache.h> | |
265 | #include <linux/err.h> | |
266 | #include <linux/crypto.h> | |
267 | #include <linux/time.h> | |
268 | #include <linux/slab.h> | |
269 | ||
270 | #include <net/icmp.h> | |
271 | #include <net/tcp.h> | |
272 | #include <net/xfrm.h> | |
273 | #include <net/ip.h> | |
274 | #include <net/netdma.h> | |
275 | #include <net/sock.h> | |
276 | ||
277 | #include <asm/uaccess.h> | |
278 | #include <asm/ioctls.h> | |
279 | ||
280 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | |
281 | ||
282 | struct percpu_counter tcp_orphan_count; | |
283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | |
284 | ||
285 | long sysctl_tcp_mem[3] __read_mostly; | |
286 | int sysctl_tcp_wmem[3] __read_mostly; | |
287 | int sysctl_tcp_rmem[3] __read_mostly; | |
288 | ||
289 | EXPORT_SYMBOL(sysctl_tcp_mem); | |
290 | EXPORT_SYMBOL(sysctl_tcp_rmem); | |
291 | EXPORT_SYMBOL(sysctl_tcp_wmem); | |
292 | ||
293 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ | |
294 | EXPORT_SYMBOL(tcp_memory_allocated); | |
295 | ||
296 | /* | |
297 | * Current number of TCP sockets. | |
298 | */ | |
299 | struct percpu_counter tcp_sockets_allocated; | |
300 | EXPORT_SYMBOL(tcp_sockets_allocated); | |
301 | ||
302 | /* | |
303 | * TCP splice context | |
304 | */ | |
305 | struct tcp_splice_state { | |
306 | struct pipe_inode_info *pipe; | |
307 | size_t len; | |
308 | unsigned int flags; | |
309 | }; | |
310 | ||
311 | /* | |
312 | * Pressure flag: try to collapse. | |
313 | * Technical note: it is used by multiple contexts non atomically. | |
314 | * All the __sk_mem_schedule() is of this nature: accounting | |
315 | * is strict, actions are advisory and have some latency. | |
316 | */ | |
317 | int tcp_memory_pressure __read_mostly; | |
318 | EXPORT_SYMBOL(tcp_memory_pressure); | |
319 | ||
320 | void tcp_enter_memory_pressure(struct sock *sk) | |
321 | { | |
322 | if (!tcp_memory_pressure) { | |
323 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); | |
324 | tcp_memory_pressure = 1; | |
325 | } | |
326 | } | |
327 | EXPORT_SYMBOL(tcp_enter_memory_pressure); | |
328 | ||
329 | /* Convert seconds to retransmits based on initial and max timeout */ | |
330 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) | |
331 | { | |
332 | u8 res = 0; | |
333 | ||
334 | if (seconds > 0) { | |
335 | int period = timeout; | |
336 | ||
337 | res = 1; | |
338 | while (seconds > period && res < 255) { | |
339 | res++; | |
340 | timeout <<= 1; | |
341 | if (timeout > rto_max) | |
342 | timeout = rto_max; | |
343 | period += timeout; | |
344 | } | |
345 | } | |
346 | return res; | |
347 | } | |
348 | ||
349 | /* Convert retransmits to seconds based on initial and max timeout */ | |
350 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) | |
351 | { | |
352 | int period = 0; | |
353 | ||
354 | if (retrans > 0) { | |
355 | period = timeout; | |
356 | while (--retrans) { | |
357 | timeout <<= 1; | |
358 | if (timeout > rto_max) | |
359 | timeout = rto_max; | |
360 | period += timeout; | |
361 | } | |
362 | } | |
363 | return period; | |
364 | } | |
365 | ||
366 | /* | |
367 | * Wait for a TCP event. | |
368 | * | |
369 | * Note that we don't need to lock the socket, as the upper poll layers | |
370 | * take care of normal races (between the test and the event) and we don't | |
371 | * go look at any of the socket buffers directly. | |
372 | */ | |
373 | unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
374 | { | |
375 | unsigned int mask; | |
376 | struct sock *sk = sock->sk; | |
377 | struct tcp_sock *tp = tcp_sk(sk); | |
378 | ||
379 | sock_poll_wait(file, sk_sleep(sk), wait); | |
380 | if (sk->sk_state == TCP_LISTEN) | |
381 | return inet_csk_listen_poll(sk); | |
382 | ||
383 | /* Socket is not locked. We are protected from async events | |
384 | * by poll logic and correct handling of state changes | |
385 | * made by other threads is impossible in any case. | |
386 | */ | |
387 | ||
388 | mask = 0; | |
389 | ||
390 | /* | |
391 | * POLLHUP is certainly not done right. But poll() doesn't | |
392 | * have a notion of HUP in just one direction, and for a | |
393 | * socket the read side is more interesting. | |
394 | * | |
395 | * Some poll() documentation says that POLLHUP is incompatible | |
396 | * with the POLLOUT/POLLWR flags, so somebody should check this | |
397 | * all. But careful, it tends to be safer to return too many | |
398 | * bits than too few, and you can easily break real applications | |
399 | * if you don't tell them that something has hung up! | |
400 | * | |
401 | * Check-me. | |
402 | * | |
403 | * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and | |
404 | * our fs/select.c). It means that after we received EOF, | |
405 | * poll always returns immediately, making impossible poll() on write() | |
406 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | |
407 | * if and only if shutdown has been made in both directions. | |
408 | * Actually, it is interesting to look how Solaris and DUX | |
409 | * solve this dilemma. I would prefer, if POLLHUP were maskable, | |
410 | * then we could set it on SND_SHUTDOWN. BTW examples given | |
411 | * in Stevens' books assume exactly this behaviour, it explains | |
412 | * why POLLHUP is incompatible with POLLOUT. --ANK | |
413 | * | |
414 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | |
415 | * blocking on fresh not-connected or disconnected socket. --ANK | |
416 | */ | |
417 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) | |
418 | mask |= POLLHUP; | |
419 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
420 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; | |
421 | ||
422 | /* Connected? */ | |
423 | if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { | |
424 | int target = sock_rcvlowat(sk, 0, INT_MAX); | |
425 | ||
426 | if (tp->urg_seq == tp->copied_seq && | |
427 | !sock_flag(sk, SOCK_URGINLINE) && | |
428 | tp->urg_data) | |
429 | target++; | |
430 | ||
431 | /* Potential race condition. If read of tp below will | |
432 | * escape above sk->sk_state, we can be illegally awaken | |
433 | * in SYN_* states. */ | |
434 | if (tp->rcv_nxt - tp->copied_seq >= target) | |
435 | mask |= POLLIN | POLLRDNORM; | |
436 | ||
437 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | |
438 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
439 | mask |= POLLOUT | POLLWRNORM; | |
440 | } else { /* send SIGIO later */ | |
441 | set_bit(SOCK_ASYNC_NOSPACE, | |
442 | &sk->sk_socket->flags); | |
443 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
444 | ||
445 | /* Race breaker. If space is freed after | |
446 | * wspace test but before the flags are set, | |
447 | * IO signal will be lost. | |
448 | */ | |
449 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) | |
450 | mask |= POLLOUT | POLLWRNORM; | |
451 | } | |
452 | } else | |
453 | mask |= POLLOUT | POLLWRNORM; | |
454 | ||
455 | if (tp->urg_data & TCP_URG_VALID) | |
456 | mask |= POLLPRI; | |
457 | } | |
458 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ | |
459 | smp_rmb(); | |
460 | if (sk->sk_err) | |
461 | mask |= POLLERR; | |
462 | ||
463 | return mask; | |
464 | } | |
465 | EXPORT_SYMBOL(tcp_poll); | |
466 | ||
467 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
468 | { | |
469 | struct tcp_sock *tp = tcp_sk(sk); | |
470 | int answ; | |
471 | ||
472 | switch (cmd) { | |
473 | case SIOCINQ: | |
474 | if (sk->sk_state == TCP_LISTEN) | |
475 | return -EINVAL; | |
476 | ||
477 | lock_sock(sk); | |
478 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
479 | answ = 0; | |
480 | else if (sock_flag(sk, SOCK_URGINLINE) || | |
481 | !tp->urg_data || | |
482 | before(tp->urg_seq, tp->copied_seq) || | |
483 | !before(tp->urg_seq, tp->rcv_nxt)) { | |
484 | struct sk_buff *skb; | |
485 | ||
486 | answ = tp->rcv_nxt - tp->copied_seq; | |
487 | ||
488 | /* Subtract 1, if FIN is in queue. */ | |
489 | skb = skb_peek_tail(&sk->sk_receive_queue); | |
490 | if (answ && skb) | |
491 | answ -= tcp_hdr(skb)->fin; | |
492 | } else | |
493 | answ = tp->urg_seq - tp->copied_seq; | |
494 | release_sock(sk); | |
495 | break; | |
496 | case SIOCATMARK: | |
497 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; | |
498 | break; | |
499 | case SIOCOUTQ: | |
500 | if (sk->sk_state == TCP_LISTEN) | |
501 | return -EINVAL; | |
502 | ||
503 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
504 | answ = 0; | |
505 | else | |
506 | answ = tp->write_seq - tp->snd_una; | |
507 | break; | |
508 | default: | |
509 | return -ENOIOCTLCMD; | |
510 | } | |
511 | ||
512 | return put_user(answ, (int __user *)arg); | |
513 | } | |
514 | EXPORT_SYMBOL(tcp_ioctl); | |
515 | ||
516 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) | |
517 | { | |
518 | TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; | |
519 | tp->pushed_seq = tp->write_seq; | |
520 | } | |
521 | ||
522 | static inline int forced_push(struct tcp_sock *tp) | |
523 | { | |
524 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | |
525 | } | |
526 | ||
527 | static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |
528 | { | |
529 | struct tcp_sock *tp = tcp_sk(sk); | |
530 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); | |
531 | ||
532 | skb->csum = 0; | |
533 | tcb->seq = tcb->end_seq = tp->write_seq; | |
534 | tcb->flags = TCPHDR_ACK; | |
535 | tcb->sacked = 0; | |
536 | skb_header_release(skb); | |
537 | tcp_add_write_queue_tail(sk, skb); | |
538 | sk->sk_wmem_queued += skb->truesize; | |
539 | sk_mem_charge(sk, skb->truesize); | |
540 | if (tp->nonagle & TCP_NAGLE_PUSH) | |
541 | tp->nonagle &= ~TCP_NAGLE_PUSH; | |
542 | } | |
543 | ||
544 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) | |
545 | { | |
546 | if (flags & MSG_OOB) | |
547 | tp->snd_up = tp->write_seq; | |
548 | } | |
549 | ||
550 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, | |
551 | int nonagle) | |
552 | { | |
553 | if (tcp_send_head(sk)) { | |
554 | struct tcp_sock *tp = tcp_sk(sk); | |
555 | ||
556 | if (!(flags & MSG_MORE) || forced_push(tp)) | |
557 | tcp_mark_push(tp, tcp_write_queue_tail(sk)); | |
558 | ||
559 | tcp_mark_urg(tp, flags); | |
560 | __tcp_push_pending_frames(sk, mss_now, | |
561 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); | |
562 | } | |
563 | } | |
564 | ||
565 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |
566 | unsigned int offset, size_t len) | |
567 | { | |
568 | struct tcp_splice_state *tss = rd_desc->arg.data; | |
569 | int ret; | |
570 | ||
571 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), | |
572 | tss->flags); | |
573 | if (ret > 0) | |
574 | rd_desc->count -= ret; | |
575 | return ret; | |
576 | } | |
577 | ||
578 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | |
579 | { | |
580 | /* Store TCP splice context information in read_descriptor_t. */ | |
581 | read_descriptor_t rd_desc = { | |
582 | .arg.data = tss, | |
583 | .count = tss->len, | |
584 | }; | |
585 | ||
586 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); | |
587 | } | |
588 | ||
589 | /** | |
590 | * tcp_splice_read - splice data from TCP socket to a pipe | |
591 | * @sock: socket to splice from | |
592 | * @ppos: position (not valid) | |
593 | * @pipe: pipe to splice to | |
594 | * @len: number of bytes to splice | |
595 | * @flags: splice modifier flags | |
596 | * | |
597 | * Description: | |
598 | * Will read pages from given socket and fill them into a pipe. | |
599 | * | |
600 | **/ | |
601 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |
602 | struct pipe_inode_info *pipe, size_t len, | |
603 | unsigned int flags) | |
604 | { | |
605 | struct sock *sk = sock->sk; | |
606 | struct tcp_splice_state tss = { | |
607 | .pipe = pipe, | |
608 | .len = len, | |
609 | .flags = flags, | |
610 | }; | |
611 | long timeo; | |
612 | ssize_t spliced; | |
613 | int ret; | |
614 | ||
615 | sock_rps_record_flow(sk); | |
616 | /* | |
617 | * We can't seek on a socket input | |
618 | */ | |
619 | if (unlikely(*ppos)) | |
620 | return -ESPIPE; | |
621 | ||
622 | ret = spliced = 0; | |
623 | ||
624 | lock_sock(sk); | |
625 | ||
626 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); | |
627 | while (tss.len) { | |
628 | ret = __tcp_splice_read(sk, &tss); | |
629 | if (ret < 0) | |
630 | break; | |
631 | else if (!ret) { | |
632 | if (spliced) | |
633 | break; | |
634 | if (sock_flag(sk, SOCK_DONE)) | |
635 | break; | |
636 | if (sk->sk_err) { | |
637 | ret = sock_error(sk); | |
638 | break; | |
639 | } | |
640 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
641 | break; | |
642 | if (sk->sk_state == TCP_CLOSE) { | |
643 | /* | |
644 | * This occurs when user tries to read | |
645 | * from never connected socket. | |
646 | */ | |
647 | if (!sock_flag(sk, SOCK_DONE)) | |
648 | ret = -ENOTCONN; | |
649 | break; | |
650 | } | |
651 | if (!timeo) { | |
652 | ret = -EAGAIN; | |
653 | break; | |
654 | } | |
655 | sk_wait_data(sk, &timeo); | |
656 | if (signal_pending(current)) { | |
657 | ret = sock_intr_errno(timeo); | |
658 | break; | |
659 | } | |
660 | continue; | |
661 | } | |
662 | tss.len -= ret; | |
663 | spliced += ret; | |
664 | ||
665 | if (!timeo) | |
666 | break; | |
667 | release_sock(sk); | |
668 | lock_sock(sk); | |
669 | ||
670 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || | |
671 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
672 | signal_pending(current)) | |
673 | break; | |
674 | } | |
675 | ||
676 | release_sock(sk); | |
677 | ||
678 | if (spliced) | |
679 | return spliced; | |
680 | ||
681 | return ret; | |
682 | } | |
683 | EXPORT_SYMBOL(tcp_splice_read); | |
684 | ||
685 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |
686 | { | |
687 | struct sk_buff *skb; | |
688 | ||
689 | /* The TCP header must be at least 32-bit aligned. */ | |
690 | size = ALIGN(size, 4); | |
691 | ||
692 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | |
693 | if (skb) { | |
694 | if (sk_wmem_schedule(sk, skb->truesize)) { | |
695 | /* | |
696 | * Make sure that we have exactly size bytes | |
697 | * available to the caller, no more, no less. | |
698 | */ | |
699 | skb_reserve(skb, skb_tailroom(skb) - size); | |
700 | return skb; | |
701 | } | |
702 | __kfree_skb(skb); | |
703 | } else { | |
704 | sk->sk_prot->enter_memory_pressure(sk); | |
705 | sk_stream_moderate_sndbuf(sk); | |
706 | } | |
707 | return NULL; | |
708 | } | |
709 | ||
710 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |
711 | int large_allowed) | |
712 | { | |
713 | struct tcp_sock *tp = tcp_sk(sk); | |
714 | u32 xmit_size_goal, old_size_goal; | |
715 | ||
716 | xmit_size_goal = mss_now; | |
717 | ||
718 | if (large_allowed && sk_can_gso(sk)) { | |
719 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | |
720 | inet_csk(sk)->icsk_af_ops->net_header_len - | |
721 | inet_csk(sk)->icsk_ext_hdr_len - | |
722 | tp->tcp_header_len); | |
723 | ||
724 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | |
725 | ||
726 | /* We try hard to avoid divides here */ | |
727 | old_size_goal = tp->xmit_size_goal_segs * mss_now; | |
728 | ||
729 | if (likely(old_size_goal <= xmit_size_goal && | |
730 | old_size_goal + mss_now > xmit_size_goal)) { | |
731 | xmit_size_goal = old_size_goal; | |
732 | } else { | |
733 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; | |
734 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; | |
735 | } | |
736 | } | |
737 | ||
738 | return max(xmit_size_goal, mss_now); | |
739 | } | |
740 | ||
741 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | |
742 | { | |
743 | int mss_now; | |
744 | ||
745 | mss_now = tcp_current_mss(sk); | |
746 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | |
747 | ||
748 | return mss_now; | |
749 | } | |
750 | ||
751 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, | |
752 | size_t psize, int flags) | |
753 | { | |
754 | struct tcp_sock *tp = tcp_sk(sk); | |
755 | int mss_now, size_goal; | |
756 | int err; | |
757 | ssize_t copied; | |
758 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
759 | ||
760 | /* Wait for a connection to finish. */ | |
761 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) | |
762 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
763 | goto out_err; | |
764 | ||
765 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
766 | ||
767 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
768 | copied = 0; | |
769 | ||
770 | err = -EPIPE; | |
771 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
772 | goto out_err; | |
773 | ||
774 | while (psize > 0) { | |
775 | struct sk_buff *skb = tcp_write_queue_tail(sk); | |
776 | struct page *page = pages[poffset / PAGE_SIZE]; | |
777 | int copy, i, can_coalesce; | |
778 | int offset = poffset % PAGE_SIZE; | |
779 | int size = min_t(size_t, psize, PAGE_SIZE - offset); | |
780 | ||
781 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { | |
782 | new_segment: | |
783 | if (!sk_stream_memory_free(sk)) | |
784 | goto wait_for_sndbuf; | |
785 | ||
786 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); | |
787 | if (!skb) | |
788 | goto wait_for_memory; | |
789 | ||
790 | skb_entail(sk, skb); | |
791 | copy = size_goal; | |
792 | } | |
793 | ||
794 | if (copy > size) | |
795 | copy = size; | |
796 | ||
797 | i = skb_shinfo(skb)->nr_frags; | |
798 | can_coalesce = skb_can_coalesce(skb, i, page, offset); | |
799 | if (!can_coalesce && i >= MAX_SKB_FRAGS) { | |
800 | tcp_mark_push(tp, skb); | |
801 | goto new_segment; | |
802 | } | |
803 | if (!sk_wmem_schedule(sk, copy)) | |
804 | goto wait_for_memory; | |
805 | ||
806 | if (can_coalesce) { | |
807 | skb_shinfo(skb)->frags[i - 1].size += copy; | |
808 | } else { | |
809 | get_page(page); | |
810 | skb_fill_page_desc(skb, i, page, offset, copy); | |
811 | } | |
812 | ||
813 | skb->len += copy; | |
814 | skb->data_len += copy; | |
815 | skb->truesize += copy; | |
816 | sk->sk_wmem_queued += copy; | |
817 | sk_mem_charge(sk, copy); | |
818 | skb->ip_summed = CHECKSUM_PARTIAL; | |
819 | tp->write_seq += copy; | |
820 | TCP_SKB_CB(skb)->end_seq += copy; | |
821 | skb_shinfo(skb)->gso_segs = 0; | |
822 | ||
823 | if (!copied) | |
824 | TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; | |
825 | ||
826 | copied += copy; | |
827 | poffset += copy; | |
828 | if (!(psize -= copy)) | |
829 | goto out; | |
830 | ||
831 | if (skb->len < size_goal || (flags & MSG_OOB)) | |
832 | continue; | |
833 | ||
834 | if (forced_push(tp)) { | |
835 | tcp_mark_push(tp, skb); | |
836 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); | |
837 | } else if (skb == tcp_send_head(sk)) | |
838 | tcp_push_one(sk, mss_now); | |
839 | continue; | |
840 | ||
841 | wait_for_sndbuf: | |
842 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
843 | wait_for_memory: | |
844 | if (copied) | |
845 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); | |
846 | ||
847 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
848 | goto do_error; | |
849 | ||
850 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
851 | } | |
852 | ||
853 | out: | |
854 | if (copied) | |
855 | tcp_push(sk, flags, mss_now, tp->nonagle); | |
856 | return copied; | |
857 | ||
858 | do_error: | |
859 | if (copied) | |
860 | goto out; | |
861 | out_err: | |
862 | return sk_stream_error(sk, flags, err); | |
863 | } | |
864 | ||
865 | int tcp_sendpage(struct sock *sk, struct page *page, int offset, | |
866 | size_t size, int flags) | |
867 | { | |
868 | ssize_t res; | |
869 | ||
870 | if (!(sk->sk_route_caps & NETIF_F_SG) || | |
871 | !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) | |
872 | return sock_no_sendpage(sk->sk_socket, page, offset, size, | |
873 | flags); | |
874 | ||
875 | lock_sock(sk); | |
876 | TCP_CHECK_TIMER(sk); | |
877 | res = do_tcp_sendpages(sk, &page, offset, size, flags); | |
878 | TCP_CHECK_TIMER(sk); | |
879 | release_sock(sk); | |
880 | return res; | |
881 | } | |
882 | EXPORT_SYMBOL(tcp_sendpage); | |
883 | ||
884 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) | |
885 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) | |
886 | ||
887 | static inline int select_size(struct sock *sk, int sg) | |
888 | { | |
889 | struct tcp_sock *tp = tcp_sk(sk); | |
890 | int tmp = tp->mss_cache; | |
891 | ||
892 | if (sg) { | |
893 | if (sk_can_gso(sk)) | |
894 | tmp = 0; | |
895 | else { | |
896 | int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); | |
897 | ||
898 | if (tmp >= pgbreak && | |
899 | tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) | |
900 | tmp = pgbreak; | |
901 | } | |
902 | } | |
903 | ||
904 | return tmp; | |
905 | } | |
906 | ||
907 | int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
908 | size_t size) | |
909 | { | |
910 | struct iovec *iov; | |
911 | struct tcp_sock *tp = tcp_sk(sk); | |
912 | struct sk_buff *skb; | |
913 | int iovlen, flags; | |
914 | int mss_now, size_goal; | |
915 | int sg, err, copied; | |
916 | long timeo; | |
917 | ||
918 | lock_sock(sk); | |
919 | TCP_CHECK_TIMER(sk); | |
920 | ||
921 | flags = msg->msg_flags; | |
922 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
923 | ||
924 | /* Wait for a connection to finish. */ | |
925 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) | |
926 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
927 | goto out_err; | |
928 | ||
929 | /* This should be in poll */ | |
930 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
931 | ||
932 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
933 | ||
934 | /* Ok commence sending. */ | |
935 | iovlen = msg->msg_iovlen; | |
936 | iov = msg->msg_iov; | |
937 | copied = 0; | |
938 | ||
939 | err = -EPIPE; | |
940 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
941 | goto out_err; | |
942 | ||
943 | sg = sk->sk_route_caps & NETIF_F_SG; | |
944 | ||
945 | while (--iovlen >= 0) { | |
946 | size_t seglen = iov->iov_len; | |
947 | unsigned char __user *from = iov->iov_base; | |
948 | ||
949 | iov++; | |
950 | ||
951 | while (seglen > 0) { | |
952 | int copy = 0; | |
953 | int max = size_goal; | |
954 | ||
955 | skb = tcp_write_queue_tail(sk); | |
956 | if (tcp_send_head(sk)) { | |
957 | if (skb->ip_summed == CHECKSUM_NONE) | |
958 | max = mss_now; | |
959 | copy = max - skb->len; | |
960 | } | |
961 | ||
962 | if (copy <= 0) { | |
963 | new_segment: | |
964 | /* Allocate new segment. If the interface is SG, | |
965 | * allocate skb fitting to single page. | |
966 | */ | |
967 | if (!sk_stream_memory_free(sk)) | |
968 | goto wait_for_sndbuf; | |
969 | ||
970 | skb = sk_stream_alloc_skb(sk, | |
971 | select_size(sk, sg), | |
972 | sk->sk_allocation); | |
973 | if (!skb) | |
974 | goto wait_for_memory; | |
975 | ||
976 | /* | |
977 | * Check whether we can use HW checksum. | |
978 | */ | |
979 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | |
980 | skb->ip_summed = CHECKSUM_PARTIAL; | |
981 | ||
982 | skb_entail(sk, skb); | |
983 | copy = size_goal; | |
984 | max = size_goal; | |
985 | } | |
986 | ||
987 | /* Try to append data to the end of skb. */ | |
988 | if (copy > seglen) | |
989 | copy = seglen; | |
990 | ||
991 | /* Where to copy to? */ | |
992 | if (skb_tailroom(skb) > 0) { | |
993 | /* We have some space in skb head. Superb! */ | |
994 | if (copy > skb_tailroom(skb)) | |
995 | copy = skb_tailroom(skb); | |
996 | if ((err = skb_add_data(skb, from, copy)) != 0) | |
997 | goto do_fault; | |
998 | } else { | |
999 | int merge = 0; | |
1000 | int i = skb_shinfo(skb)->nr_frags; | |
1001 | struct page *page = TCP_PAGE(sk); | |
1002 | int off = TCP_OFF(sk); | |
1003 | ||
1004 | if (skb_can_coalesce(skb, i, page, off) && | |
1005 | off != PAGE_SIZE) { | |
1006 | /* We can extend the last page | |
1007 | * fragment. */ | |
1008 | merge = 1; | |
1009 | } else if (i == MAX_SKB_FRAGS || !sg) { | |
1010 | /* Need to add new fragment and cannot | |
1011 | * do this because interface is non-SG, | |
1012 | * or because all the page slots are | |
1013 | * busy. */ | |
1014 | tcp_mark_push(tp, skb); | |
1015 | goto new_segment; | |
1016 | } else if (page) { | |
1017 | if (off == PAGE_SIZE) { | |
1018 | put_page(page); | |
1019 | TCP_PAGE(sk) = page = NULL; | |
1020 | off = 0; | |
1021 | } | |
1022 | } else | |
1023 | off = 0; | |
1024 | ||
1025 | if (copy > PAGE_SIZE - off) | |
1026 | copy = PAGE_SIZE - off; | |
1027 | ||
1028 | if (!sk_wmem_schedule(sk, copy)) | |
1029 | goto wait_for_memory; | |
1030 | ||
1031 | if (!page) { | |
1032 | /* Allocate new cache page. */ | |
1033 | if (!(page = sk_stream_alloc_page(sk))) | |
1034 | goto wait_for_memory; | |
1035 | } | |
1036 | ||
1037 | /* Time to copy data. We are close to | |
1038 | * the end! */ | |
1039 | err = skb_copy_to_page(sk, from, skb, page, | |
1040 | off, copy); | |
1041 | if (err) { | |
1042 | /* If this page was new, give it to the | |
1043 | * socket so it does not get leaked. | |
1044 | */ | |
1045 | if (!TCP_PAGE(sk)) { | |
1046 | TCP_PAGE(sk) = page; | |
1047 | TCP_OFF(sk) = 0; | |
1048 | } | |
1049 | goto do_error; | |
1050 | } | |
1051 | ||
1052 | /* Update the skb. */ | |
1053 | if (merge) { | |
1054 | skb_shinfo(skb)->frags[i - 1].size += | |
1055 | copy; | |
1056 | } else { | |
1057 | skb_fill_page_desc(skb, i, page, off, copy); | |
1058 | if (TCP_PAGE(sk)) { | |
1059 | get_page(page); | |
1060 | } else if (off + copy < PAGE_SIZE) { | |
1061 | get_page(page); | |
1062 | TCP_PAGE(sk) = page; | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | TCP_OFF(sk) = off + copy; | |
1067 | } | |
1068 | ||
1069 | if (!copied) | |
1070 | TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; | |
1071 | ||
1072 | tp->write_seq += copy; | |
1073 | TCP_SKB_CB(skb)->end_seq += copy; | |
1074 | skb_shinfo(skb)->gso_segs = 0; | |
1075 | ||
1076 | from += copy; | |
1077 | copied += copy; | |
1078 | if ((seglen -= copy) == 0 && iovlen == 0) | |
1079 | goto out; | |
1080 | ||
1081 | if (skb->len < max || (flags & MSG_OOB)) | |
1082 | continue; | |
1083 | ||
1084 | if (forced_push(tp)) { | |
1085 | tcp_mark_push(tp, skb); | |
1086 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); | |
1087 | } else if (skb == tcp_send_head(sk)) | |
1088 | tcp_push_one(sk, mss_now); | |
1089 | continue; | |
1090 | ||
1091 | wait_for_sndbuf: | |
1092 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
1093 | wait_for_memory: | |
1094 | if (copied) | |
1095 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); | |
1096 | ||
1097 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
1098 | goto do_error; | |
1099 | ||
1100 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | out: | |
1105 | if (copied) | |
1106 | tcp_push(sk, flags, mss_now, tp->nonagle); | |
1107 | TCP_CHECK_TIMER(sk); | |
1108 | release_sock(sk); | |
1109 | return copied; | |
1110 | ||
1111 | do_fault: | |
1112 | if (!skb->len) { | |
1113 | tcp_unlink_write_queue(skb, sk); | |
1114 | /* It is the one place in all of TCP, except connection | |
1115 | * reset, where we can be unlinking the send_head. | |
1116 | */ | |
1117 | tcp_check_send_head(sk, skb); | |
1118 | sk_wmem_free_skb(sk, skb); | |
1119 | } | |
1120 | ||
1121 | do_error: | |
1122 | if (copied) | |
1123 | goto out; | |
1124 | out_err: | |
1125 | err = sk_stream_error(sk, flags, err); | |
1126 | TCP_CHECK_TIMER(sk); | |
1127 | release_sock(sk); | |
1128 | return err; | |
1129 | } | |
1130 | EXPORT_SYMBOL(tcp_sendmsg); | |
1131 | ||
1132 | /* | |
1133 | * Handle reading urgent data. BSD has very simple semantics for | |
1134 | * this, no blocking and very strange errors 8) | |
1135 | */ | |
1136 | ||
1137 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) | |
1138 | { | |
1139 | struct tcp_sock *tp = tcp_sk(sk); | |
1140 | ||
1141 | /* No URG data to read. */ | |
1142 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || | |
1143 | tp->urg_data == TCP_URG_READ) | |
1144 | return -EINVAL; /* Yes this is right ! */ | |
1145 | ||
1146 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) | |
1147 | return -ENOTCONN; | |
1148 | ||
1149 | if (tp->urg_data & TCP_URG_VALID) { | |
1150 | int err = 0; | |
1151 | char c = tp->urg_data; | |
1152 | ||
1153 | if (!(flags & MSG_PEEK)) | |
1154 | tp->urg_data = TCP_URG_READ; | |
1155 | ||
1156 | /* Read urgent data. */ | |
1157 | msg->msg_flags |= MSG_OOB; | |
1158 | ||
1159 | if (len > 0) { | |
1160 | if (!(flags & MSG_TRUNC)) | |
1161 | err = memcpy_toiovec(msg->msg_iov, &c, 1); | |
1162 | len = 1; | |
1163 | } else | |
1164 | msg->msg_flags |= MSG_TRUNC; | |
1165 | ||
1166 | return err ? -EFAULT : len; | |
1167 | } | |
1168 | ||
1169 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) | |
1170 | return 0; | |
1171 | ||
1172 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and | |
1173 | * the available implementations agree in this case: | |
1174 | * this call should never block, independent of the | |
1175 | * blocking state of the socket. | |
1176 | * Mike <pall@rz.uni-karlsruhe.de> | |
1177 | */ | |
1178 | return -EAGAIN; | |
1179 | } | |
1180 | ||
1181 | /* Clean up the receive buffer for full frames taken by the user, | |
1182 | * then send an ACK if necessary. COPIED is the number of bytes | |
1183 | * tcp_recvmsg has given to the user so far, it speeds up the | |
1184 | * calculation of whether or not we must ACK for the sake of | |
1185 | * a window update. | |
1186 | */ | |
1187 | void tcp_cleanup_rbuf(struct sock *sk, int copied) | |
1188 | { | |
1189 | struct tcp_sock *tp = tcp_sk(sk); | |
1190 | int time_to_ack = 0; | |
1191 | ||
1192 | #if TCP_DEBUG | |
1193 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); | |
1194 | ||
1195 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), | |
1196 | KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", | |
1197 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); | |
1198 | #endif | |
1199 | ||
1200 | if (inet_csk_ack_scheduled(sk)) { | |
1201 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1202 | /* Delayed ACKs frequently hit locked sockets during bulk | |
1203 | * receive. */ | |
1204 | if (icsk->icsk_ack.blocked || | |
1205 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ | |
1206 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || | |
1207 | /* | |
1208 | * If this read emptied read buffer, we send ACK, if | |
1209 | * connection is not bidirectional, user drained | |
1210 | * receive buffer and there was a small segment | |
1211 | * in queue. | |
1212 | */ | |
1213 | (copied > 0 && | |
1214 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || | |
1215 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && | |
1216 | !icsk->icsk_ack.pingpong)) && | |
1217 | !atomic_read(&sk->sk_rmem_alloc))) | |
1218 | time_to_ack = 1; | |
1219 | } | |
1220 | ||
1221 | /* We send an ACK if we can now advertise a non-zero window | |
1222 | * which has been raised "significantly". | |
1223 | * | |
1224 | * Even if window raised up to infinity, do not send window open ACK | |
1225 | * in states, where we will not receive more. It is useless. | |
1226 | */ | |
1227 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1228 | __u32 rcv_window_now = tcp_receive_window(tp); | |
1229 | ||
1230 | /* Optimize, __tcp_select_window() is not cheap. */ | |
1231 | if (2*rcv_window_now <= tp->window_clamp) { | |
1232 | __u32 new_window = __tcp_select_window(sk); | |
1233 | ||
1234 | /* Send ACK now, if this read freed lots of space | |
1235 | * in our buffer. Certainly, new_window is new window. | |
1236 | * We can advertise it now, if it is not less than current one. | |
1237 | * "Lots" means "at least twice" here. | |
1238 | */ | |
1239 | if (new_window && new_window >= 2 * rcv_window_now) | |
1240 | time_to_ack = 1; | |
1241 | } | |
1242 | } | |
1243 | if (time_to_ack) | |
1244 | tcp_send_ack(sk); | |
1245 | } | |
1246 | ||
1247 | static void tcp_prequeue_process(struct sock *sk) | |
1248 | { | |
1249 | struct sk_buff *skb; | |
1250 | struct tcp_sock *tp = tcp_sk(sk); | |
1251 | ||
1252 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); | |
1253 | ||
1254 | /* RX process wants to run with disabled BHs, though it is not | |
1255 | * necessary */ | |
1256 | local_bh_disable(); | |
1257 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | |
1258 | sk_backlog_rcv(sk, skb); | |
1259 | local_bh_enable(); | |
1260 | ||
1261 | /* Clear memory counter. */ | |
1262 | tp->ucopy.memory = 0; | |
1263 | } | |
1264 | ||
1265 | #ifdef CONFIG_NET_DMA | |
1266 | static void tcp_service_net_dma(struct sock *sk, bool wait) | |
1267 | { | |
1268 | dma_cookie_t done, used; | |
1269 | dma_cookie_t last_issued; | |
1270 | struct tcp_sock *tp = tcp_sk(sk); | |
1271 | ||
1272 | if (!tp->ucopy.dma_chan) | |
1273 | return; | |
1274 | ||
1275 | last_issued = tp->ucopy.dma_cookie; | |
1276 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1277 | ||
1278 | do { | |
1279 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | |
1280 | last_issued, &done, | |
1281 | &used) == DMA_SUCCESS) { | |
1282 | /* Safe to free early-copied skbs now */ | |
1283 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
1284 | break; | |
1285 | } else { | |
1286 | struct sk_buff *skb; | |
1287 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | |
1288 | (dma_async_is_complete(skb->dma_cookie, done, | |
1289 | used) == DMA_SUCCESS)) { | |
1290 | __skb_dequeue(&sk->sk_async_wait_queue); | |
1291 | kfree_skb(skb); | |
1292 | } | |
1293 | } | |
1294 | } while (wait); | |
1295 | } | |
1296 | #endif | |
1297 | ||
1298 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | |
1299 | { | |
1300 | struct sk_buff *skb; | |
1301 | u32 offset; | |
1302 | ||
1303 | skb_queue_walk(&sk->sk_receive_queue, skb) { | |
1304 | offset = seq - TCP_SKB_CB(skb)->seq; | |
1305 | if (tcp_hdr(skb)->syn) | |
1306 | offset--; | |
1307 | if (offset < skb->len || tcp_hdr(skb)->fin) { | |
1308 | *off = offset; | |
1309 | return skb; | |
1310 | } | |
1311 | } | |
1312 | return NULL; | |
1313 | } | |
1314 | ||
1315 | /* | |
1316 | * This routine provides an alternative to tcp_recvmsg() for routines | |
1317 | * that would like to handle copying from skbuffs directly in 'sendfile' | |
1318 | * fashion. | |
1319 | * Note: | |
1320 | * - It is assumed that the socket was locked by the caller. | |
1321 | * - The routine does not block. | |
1322 | * - At present, there is no support for reading OOB data | |
1323 | * or for 'peeking' the socket using this routine | |
1324 | * (although both would be easy to implement). | |
1325 | */ | |
1326 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
1327 | sk_read_actor_t recv_actor) | |
1328 | { | |
1329 | struct sk_buff *skb; | |
1330 | struct tcp_sock *tp = tcp_sk(sk); | |
1331 | u32 seq = tp->copied_seq; | |
1332 | u32 offset; | |
1333 | int copied = 0; | |
1334 | ||
1335 | if (sk->sk_state == TCP_LISTEN) | |
1336 | return -ENOTCONN; | |
1337 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { | |
1338 | if (offset < skb->len) { | |
1339 | int used; | |
1340 | size_t len; | |
1341 | ||
1342 | len = skb->len - offset; | |
1343 | /* Stop reading if we hit a patch of urgent data */ | |
1344 | if (tp->urg_data) { | |
1345 | u32 urg_offset = tp->urg_seq - seq; | |
1346 | if (urg_offset < len) | |
1347 | len = urg_offset; | |
1348 | if (!len) | |
1349 | break; | |
1350 | } | |
1351 | used = recv_actor(desc, skb, offset, len); | |
1352 | if (used < 0) { | |
1353 | if (!copied) | |
1354 | copied = used; | |
1355 | break; | |
1356 | } else if (used <= len) { | |
1357 | seq += used; | |
1358 | copied += used; | |
1359 | offset += used; | |
1360 | } | |
1361 | /* | |
1362 | * If recv_actor drops the lock (e.g. TCP splice | |
1363 | * receive) the skb pointer might be invalid when | |
1364 | * getting here: tcp_collapse might have deleted it | |
1365 | * while aggregating skbs from the socket queue. | |
1366 | */ | |
1367 | skb = tcp_recv_skb(sk, seq-1, &offset); | |
1368 | if (!skb || (offset+1 != skb->len)) | |
1369 | break; | |
1370 | } | |
1371 | if (tcp_hdr(skb)->fin) { | |
1372 | sk_eat_skb(sk, skb, 0); | |
1373 | ++seq; | |
1374 | break; | |
1375 | } | |
1376 | sk_eat_skb(sk, skb, 0); | |
1377 | if (!desc->count) | |
1378 | break; | |
1379 | tp->copied_seq = seq; | |
1380 | } | |
1381 | tp->copied_seq = seq; | |
1382 | ||
1383 | tcp_rcv_space_adjust(sk); | |
1384 | ||
1385 | /* Clean up data we have read: This will do ACK frames. */ | |
1386 | if (copied > 0) | |
1387 | tcp_cleanup_rbuf(sk, copied); | |
1388 | return copied; | |
1389 | } | |
1390 | EXPORT_SYMBOL(tcp_read_sock); | |
1391 | ||
1392 | /* | |
1393 | * This routine copies from a sock struct into the user buffer. | |
1394 | * | |
1395 | * Technical note: in 2.3 we work on _locked_ socket, so that | |
1396 | * tricks with *seq access order and skb->users are not required. | |
1397 | * Probably, code can be easily improved even more. | |
1398 | */ | |
1399 | ||
1400 | int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
1401 | size_t len, int nonblock, int flags, int *addr_len) | |
1402 | { | |
1403 | struct tcp_sock *tp = tcp_sk(sk); | |
1404 | int copied = 0; | |
1405 | u32 peek_seq; | |
1406 | u32 *seq; | |
1407 | unsigned long used; | |
1408 | int err; | |
1409 | int target; /* Read at least this many bytes */ | |
1410 | long timeo; | |
1411 | struct task_struct *user_recv = NULL; | |
1412 | int copied_early = 0; | |
1413 | struct sk_buff *skb; | |
1414 | u32 urg_hole = 0; | |
1415 | ||
1416 | lock_sock(sk); | |
1417 | ||
1418 | TCP_CHECK_TIMER(sk); | |
1419 | ||
1420 | err = -ENOTCONN; | |
1421 | if (sk->sk_state == TCP_LISTEN) | |
1422 | goto out; | |
1423 | ||
1424 | timeo = sock_rcvtimeo(sk, nonblock); | |
1425 | ||
1426 | /* Urgent data needs to be handled specially. */ | |
1427 | if (flags & MSG_OOB) | |
1428 | goto recv_urg; | |
1429 | ||
1430 | seq = &tp->copied_seq; | |
1431 | if (flags & MSG_PEEK) { | |
1432 | peek_seq = tp->copied_seq; | |
1433 | seq = &peek_seq; | |
1434 | } | |
1435 | ||
1436 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
1437 | ||
1438 | #ifdef CONFIG_NET_DMA | |
1439 | tp->ucopy.dma_chan = NULL; | |
1440 | preempt_disable(); | |
1441 | skb = skb_peek_tail(&sk->sk_receive_queue); | |
1442 | { | |
1443 | int available = 0; | |
1444 | ||
1445 | if (skb) | |
1446 | available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); | |
1447 | if ((available < target) && | |
1448 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | |
1449 | !sysctl_tcp_low_latency && | |
1450 | dma_find_channel(DMA_MEMCPY)) { | |
1451 | preempt_enable_no_resched(); | |
1452 | tp->ucopy.pinned_list = | |
1453 | dma_pin_iovec_pages(msg->msg_iov, len); | |
1454 | } else { | |
1455 | preempt_enable_no_resched(); | |
1456 | } | |
1457 | } | |
1458 | #endif | |
1459 | ||
1460 | do { | |
1461 | u32 offset; | |
1462 | ||
1463 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ | |
1464 | if (tp->urg_data && tp->urg_seq == *seq) { | |
1465 | if (copied) | |
1466 | break; | |
1467 | if (signal_pending(current)) { | |
1468 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; | |
1469 | break; | |
1470 | } | |
1471 | } | |
1472 | ||
1473 | /* Next get a buffer. */ | |
1474 | ||
1475 | skb_queue_walk(&sk->sk_receive_queue, skb) { | |
1476 | /* Now that we have two receive queues this | |
1477 | * shouldn't happen. | |
1478 | */ | |
1479 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), | |
1480 | KERN_INFO "recvmsg bug: copied %X " | |
1481 | "seq %X rcvnxt %X fl %X\n", *seq, | |
1482 | TCP_SKB_CB(skb)->seq, tp->rcv_nxt, | |
1483 | flags)) | |
1484 | break; | |
1485 | ||
1486 | offset = *seq - TCP_SKB_CB(skb)->seq; | |
1487 | if (tcp_hdr(skb)->syn) | |
1488 | offset--; | |
1489 | if (offset < skb->len) | |
1490 | goto found_ok_skb; | |
1491 | if (tcp_hdr(skb)->fin) | |
1492 | goto found_fin_ok; | |
1493 | WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " | |
1494 | "copied %X seq %X rcvnxt %X fl %X\n", | |
1495 | *seq, TCP_SKB_CB(skb)->seq, | |
1496 | tp->rcv_nxt, flags); | |
1497 | } | |
1498 | ||
1499 | /* Well, if we have backlog, try to process it now yet. */ | |
1500 | ||
1501 | if (copied >= target && !sk->sk_backlog.tail) | |
1502 | break; | |
1503 | ||
1504 | if (copied) { | |
1505 | if (sk->sk_err || | |
1506 | sk->sk_state == TCP_CLOSE || | |
1507 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1508 | !timeo || | |
1509 | signal_pending(current)) | |
1510 | break; | |
1511 | } else { | |
1512 | if (sock_flag(sk, SOCK_DONE)) | |
1513 | break; | |
1514 | ||
1515 | if (sk->sk_err) { | |
1516 | copied = sock_error(sk); | |
1517 | break; | |
1518 | } | |
1519 | ||
1520 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1521 | break; | |
1522 | ||
1523 | if (sk->sk_state == TCP_CLOSE) { | |
1524 | if (!sock_flag(sk, SOCK_DONE)) { | |
1525 | /* This occurs when user tries to read | |
1526 | * from never connected socket. | |
1527 | */ | |
1528 | copied = -ENOTCONN; | |
1529 | break; | |
1530 | } | |
1531 | break; | |
1532 | } | |
1533 | ||
1534 | if (!timeo) { | |
1535 | copied = -EAGAIN; | |
1536 | break; | |
1537 | } | |
1538 | ||
1539 | if (signal_pending(current)) { | |
1540 | copied = sock_intr_errno(timeo); | |
1541 | break; | |
1542 | } | |
1543 | } | |
1544 | ||
1545 | tcp_cleanup_rbuf(sk, copied); | |
1546 | ||
1547 | if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { | |
1548 | /* Install new reader */ | |
1549 | if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { | |
1550 | user_recv = current; | |
1551 | tp->ucopy.task = user_recv; | |
1552 | tp->ucopy.iov = msg->msg_iov; | |
1553 | } | |
1554 | ||
1555 | tp->ucopy.len = len; | |
1556 | ||
1557 | WARN_ON(tp->copied_seq != tp->rcv_nxt && | |
1558 | !(flags & (MSG_PEEK | MSG_TRUNC))); | |
1559 | ||
1560 | /* Ugly... If prequeue is not empty, we have to | |
1561 | * process it before releasing socket, otherwise | |
1562 | * order will be broken at second iteration. | |
1563 | * More elegant solution is required!!! | |
1564 | * | |
1565 | * Look: we have the following (pseudo)queues: | |
1566 | * | |
1567 | * 1. packets in flight | |
1568 | * 2. backlog | |
1569 | * 3. prequeue | |
1570 | * 4. receive_queue | |
1571 | * | |
1572 | * Each queue can be processed only if the next ones | |
1573 | * are empty. At this point we have empty receive_queue. | |
1574 | * But prequeue _can_ be not empty after 2nd iteration, | |
1575 | * when we jumped to start of loop because backlog | |
1576 | * processing added something to receive_queue. | |
1577 | * We cannot release_sock(), because backlog contains | |
1578 | * packets arrived _after_ prequeued ones. | |
1579 | * | |
1580 | * Shortly, algorithm is clear --- to process all | |
1581 | * the queues in order. We could make it more directly, | |
1582 | * requeueing packets from backlog to prequeue, if | |
1583 | * is not empty. It is more elegant, but eats cycles, | |
1584 | * unfortunately. | |
1585 | */ | |
1586 | if (!skb_queue_empty(&tp->ucopy.prequeue)) | |
1587 | goto do_prequeue; | |
1588 | ||
1589 | /* __ Set realtime policy in scheduler __ */ | |
1590 | } | |
1591 | ||
1592 | #ifdef CONFIG_NET_DMA | |
1593 | if (tp->ucopy.dma_chan) | |
1594 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1595 | #endif | |
1596 | if (copied >= target) { | |
1597 | /* Do not sleep, just process backlog. */ | |
1598 | release_sock(sk); | |
1599 | lock_sock(sk); | |
1600 | } else | |
1601 | sk_wait_data(sk, &timeo); | |
1602 | ||
1603 | #ifdef CONFIG_NET_DMA | |
1604 | tcp_service_net_dma(sk, false); /* Don't block */ | |
1605 | tp->ucopy.wakeup = 0; | |
1606 | #endif | |
1607 | ||
1608 | if (user_recv) { | |
1609 | int chunk; | |
1610 | ||
1611 | /* __ Restore normal policy in scheduler __ */ | |
1612 | ||
1613 | if ((chunk = len - tp->ucopy.len) != 0) { | |
1614 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); | |
1615 | len -= chunk; | |
1616 | copied += chunk; | |
1617 | } | |
1618 | ||
1619 | if (tp->rcv_nxt == tp->copied_seq && | |
1620 | !skb_queue_empty(&tp->ucopy.prequeue)) { | |
1621 | do_prequeue: | |
1622 | tcp_prequeue_process(sk); | |
1623 | ||
1624 | if ((chunk = len - tp->ucopy.len) != 0) { | |
1625 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | |
1626 | len -= chunk; | |
1627 | copied += chunk; | |
1628 | } | |
1629 | } | |
1630 | } | |
1631 | if ((flags & MSG_PEEK) && | |
1632 | (peek_seq - copied - urg_hole != tp->copied_seq)) { | |
1633 | if (net_ratelimit()) | |
1634 | printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", | |
1635 | current->comm, task_pid_nr(current)); | |
1636 | peek_seq = tp->copied_seq; | |
1637 | } | |
1638 | continue; | |
1639 | ||
1640 | found_ok_skb: | |
1641 | /* Ok so how much can we use? */ | |
1642 | used = skb->len - offset; | |
1643 | if (len < used) | |
1644 | used = len; | |
1645 | ||
1646 | /* Do we have urgent data here? */ | |
1647 | if (tp->urg_data) { | |
1648 | u32 urg_offset = tp->urg_seq - *seq; | |
1649 | if (urg_offset < used) { | |
1650 | if (!urg_offset) { | |
1651 | if (!sock_flag(sk, SOCK_URGINLINE)) { | |
1652 | ++*seq; | |
1653 | urg_hole++; | |
1654 | offset++; | |
1655 | used--; | |
1656 | if (!used) | |
1657 | goto skip_copy; | |
1658 | } | |
1659 | } else | |
1660 | used = urg_offset; | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | if (!(flags & MSG_TRUNC)) { | |
1665 | #ifdef CONFIG_NET_DMA | |
1666 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | |
1667 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); | |
1668 | ||
1669 | if (tp->ucopy.dma_chan) { | |
1670 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | |
1671 | tp->ucopy.dma_chan, skb, offset, | |
1672 | msg->msg_iov, used, | |
1673 | tp->ucopy.pinned_list); | |
1674 | ||
1675 | if (tp->ucopy.dma_cookie < 0) { | |
1676 | ||
1677 | printk(KERN_ALERT "dma_cookie < 0\n"); | |
1678 | ||
1679 | /* Exception. Bailout! */ | |
1680 | if (!copied) | |
1681 | copied = -EFAULT; | |
1682 | break; | |
1683 | } | |
1684 | ||
1685 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1686 | ||
1687 | if ((offset + used) == skb->len) | |
1688 | copied_early = 1; | |
1689 | ||
1690 | } else | |
1691 | #endif | |
1692 | { | |
1693 | err = skb_copy_datagram_iovec(skb, offset, | |
1694 | msg->msg_iov, used); | |
1695 | if (err) { | |
1696 | /* Exception. Bailout! */ | |
1697 | if (!copied) | |
1698 | copied = -EFAULT; | |
1699 | break; | |
1700 | } | |
1701 | } | |
1702 | } | |
1703 | ||
1704 | *seq += used; | |
1705 | copied += used; | |
1706 | len -= used; | |
1707 | ||
1708 | tcp_rcv_space_adjust(sk); | |
1709 | ||
1710 | skip_copy: | |
1711 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { | |
1712 | tp->urg_data = 0; | |
1713 | tcp_fast_path_check(sk); | |
1714 | } | |
1715 | if (used + offset < skb->len) | |
1716 | continue; | |
1717 | ||
1718 | if (tcp_hdr(skb)->fin) | |
1719 | goto found_fin_ok; | |
1720 | if (!(flags & MSG_PEEK)) { | |
1721 | sk_eat_skb(sk, skb, copied_early); | |
1722 | copied_early = 0; | |
1723 | } | |
1724 | continue; | |
1725 | ||
1726 | found_fin_ok: | |
1727 | /* Process the FIN. */ | |
1728 | ++*seq; | |
1729 | if (!(flags & MSG_PEEK)) { | |
1730 | sk_eat_skb(sk, skb, copied_early); | |
1731 | copied_early = 0; | |
1732 | } | |
1733 | break; | |
1734 | } while (len > 0); | |
1735 | ||
1736 | if (user_recv) { | |
1737 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | |
1738 | int chunk; | |
1739 | ||
1740 | tp->ucopy.len = copied > 0 ? len : 0; | |
1741 | ||
1742 | tcp_prequeue_process(sk); | |
1743 | ||
1744 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | |
1745 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | |
1746 | len -= chunk; | |
1747 | copied += chunk; | |
1748 | } | |
1749 | } | |
1750 | ||
1751 | tp->ucopy.task = NULL; | |
1752 | tp->ucopy.len = 0; | |
1753 | } | |
1754 | ||
1755 | #ifdef CONFIG_NET_DMA | |
1756 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ | |
1757 | tp->ucopy.dma_chan = NULL; | |
1758 | ||
1759 | if (tp->ucopy.pinned_list) { | |
1760 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | |
1761 | tp->ucopy.pinned_list = NULL; | |
1762 | } | |
1763 | #endif | |
1764 | ||
1765 | /* According to UNIX98, msg_name/msg_namelen are ignored | |
1766 | * on connected socket. I was just happy when found this 8) --ANK | |
1767 | */ | |
1768 | ||
1769 | /* Clean up data we have read: This will do ACK frames. */ | |
1770 | tcp_cleanup_rbuf(sk, copied); | |
1771 | ||
1772 | TCP_CHECK_TIMER(sk); | |
1773 | release_sock(sk); | |
1774 | return copied; | |
1775 | ||
1776 | out: | |
1777 | TCP_CHECK_TIMER(sk); | |
1778 | release_sock(sk); | |
1779 | return err; | |
1780 | ||
1781 | recv_urg: | |
1782 | err = tcp_recv_urg(sk, msg, len, flags); | |
1783 | goto out; | |
1784 | } | |
1785 | EXPORT_SYMBOL(tcp_recvmsg); | |
1786 | ||
1787 | void tcp_set_state(struct sock *sk, int state) | |
1788 | { | |
1789 | int oldstate = sk->sk_state; | |
1790 | ||
1791 | switch (state) { | |
1792 | case TCP_ESTABLISHED: | |
1793 | if (oldstate != TCP_ESTABLISHED) | |
1794 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); | |
1795 | break; | |
1796 | ||
1797 | case TCP_CLOSE: | |
1798 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | |
1799 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); | |
1800 | ||
1801 | sk->sk_prot->unhash(sk); | |
1802 | if (inet_csk(sk)->icsk_bind_hash && | |
1803 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | |
1804 | inet_put_port(sk); | |
1805 | /* fall through */ | |
1806 | default: | |
1807 | if (oldstate == TCP_ESTABLISHED) | |
1808 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); | |
1809 | } | |
1810 | ||
1811 | /* Change state AFTER socket is unhashed to avoid closed | |
1812 | * socket sitting in hash tables. | |
1813 | */ | |
1814 | sk->sk_state = state; | |
1815 | ||
1816 | #ifdef STATE_TRACE | |
1817 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); | |
1818 | #endif | |
1819 | } | |
1820 | EXPORT_SYMBOL_GPL(tcp_set_state); | |
1821 | ||
1822 | /* | |
1823 | * State processing on a close. This implements the state shift for | |
1824 | * sending our FIN frame. Note that we only send a FIN for some | |
1825 | * states. A shutdown() may have already sent the FIN, or we may be | |
1826 | * closed. | |
1827 | */ | |
1828 | ||
1829 | static const unsigned char new_state[16] = { | |
1830 | /* current state: new state: action: */ | |
1831 | /* (Invalid) */ TCP_CLOSE, | |
1832 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1833 | /* TCP_SYN_SENT */ TCP_CLOSE, | |
1834 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1835 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, | |
1836 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, | |
1837 | /* TCP_TIME_WAIT */ TCP_CLOSE, | |
1838 | /* TCP_CLOSE */ TCP_CLOSE, | |
1839 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, | |
1840 | /* TCP_LAST_ACK */ TCP_LAST_ACK, | |
1841 | /* TCP_LISTEN */ TCP_CLOSE, | |
1842 | /* TCP_CLOSING */ TCP_CLOSING, | |
1843 | }; | |
1844 | ||
1845 | static int tcp_close_state(struct sock *sk) | |
1846 | { | |
1847 | int next = (int)new_state[sk->sk_state]; | |
1848 | int ns = next & TCP_STATE_MASK; | |
1849 | ||
1850 | tcp_set_state(sk, ns); | |
1851 | ||
1852 | return next & TCP_ACTION_FIN; | |
1853 | } | |
1854 | ||
1855 | /* | |
1856 | * Shutdown the sending side of a connection. Much like close except | |
1857 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). | |
1858 | */ | |
1859 | ||
1860 | void tcp_shutdown(struct sock *sk, int how) | |
1861 | { | |
1862 | /* We need to grab some memory, and put together a FIN, | |
1863 | * and then put it into the queue to be sent. | |
1864 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. | |
1865 | */ | |
1866 | if (!(how & SEND_SHUTDOWN)) | |
1867 | return; | |
1868 | ||
1869 | /* If we've already sent a FIN, or it's a closed state, skip this. */ | |
1870 | if ((1 << sk->sk_state) & | |
1871 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
1872 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { | |
1873 | /* Clear out any half completed packets. FIN if needed. */ | |
1874 | if (tcp_close_state(sk)) | |
1875 | tcp_send_fin(sk); | |
1876 | } | |
1877 | } | |
1878 | EXPORT_SYMBOL(tcp_shutdown); | |
1879 | ||
1880 | void tcp_close(struct sock *sk, long timeout) | |
1881 | { | |
1882 | struct sk_buff *skb; | |
1883 | int data_was_unread = 0; | |
1884 | int state; | |
1885 | ||
1886 | lock_sock(sk); | |
1887 | sk->sk_shutdown = SHUTDOWN_MASK; | |
1888 | ||
1889 | if (sk->sk_state == TCP_LISTEN) { | |
1890 | tcp_set_state(sk, TCP_CLOSE); | |
1891 | ||
1892 | /* Special case. */ | |
1893 | inet_csk_listen_stop(sk); | |
1894 | ||
1895 | goto adjudge_to_death; | |
1896 | } | |
1897 | ||
1898 | /* We need to flush the recv. buffs. We do this only on the | |
1899 | * descriptor close, not protocol-sourced closes, because the | |
1900 | * reader process may not have drained the data yet! | |
1901 | */ | |
1902 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { | |
1903 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - | |
1904 | tcp_hdr(skb)->fin; | |
1905 | data_was_unread += len; | |
1906 | __kfree_skb(skb); | |
1907 | } | |
1908 | ||
1909 | sk_mem_reclaim(sk); | |
1910 | ||
1911 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ | |
1912 | if (sk->sk_state == TCP_CLOSE) | |
1913 | goto adjudge_to_death; | |
1914 | ||
1915 | /* As outlined in RFC 2525, section 2.17, we send a RST here because | |
1916 | * data was lost. To witness the awful effects of the old behavior of | |
1917 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk | |
1918 | * GET in an FTP client, suspend the process, wait for the client to | |
1919 | * advertise a zero window, then kill -9 the FTP client, wheee... | |
1920 | * Note: timeout is always zero in such a case. | |
1921 | */ | |
1922 | if (data_was_unread) { | |
1923 | /* Unread data was tossed, zap the connection. */ | |
1924 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | |
1925 | tcp_set_state(sk, TCP_CLOSE); | |
1926 | tcp_send_active_reset(sk, sk->sk_allocation); | |
1927 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | |
1928 | /* Check zero linger _after_ checking for unread data. */ | |
1929 | sk->sk_prot->disconnect(sk, 0); | |
1930 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); | |
1931 | } else if (tcp_close_state(sk)) { | |
1932 | /* We FIN if the application ate all the data before | |
1933 | * zapping the connection. | |
1934 | */ | |
1935 | ||
1936 | /* RED-PEN. Formally speaking, we have broken TCP state | |
1937 | * machine. State transitions: | |
1938 | * | |
1939 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 | |
1940 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) | |
1941 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK | |
1942 | * | |
1943 | * are legal only when FIN has been sent (i.e. in window), | |
1944 | * rather than queued out of window. Purists blame. | |
1945 | * | |
1946 | * F.e. "RFC state" is ESTABLISHED, | |
1947 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. | |
1948 | * | |
1949 | * The visible declinations are that sometimes | |
1950 | * we enter time-wait state, when it is not required really | |
1951 | * (harmless), do not send active resets, when they are | |
1952 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when | |
1953 | * they look as CLOSING or LAST_ACK for Linux) | |
1954 | * Probably, I missed some more holelets. | |
1955 | * --ANK | |
1956 | */ | |
1957 | tcp_send_fin(sk); | |
1958 | } | |
1959 | ||
1960 | sk_stream_wait_close(sk, timeout); | |
1961 | ||
1962 | adjudge_to_death: | |
1963 | state = sk->sk_state; | |
1964 | sock_hold(sk); | |
1965 | sock_orphan(sk); | |
1966 | ||
1967 | /* It is the last release_sock in its life. It will remove backlog. */ | |
1968 | release_sock(sk); | |
1969 | ||
1970 | ||
1971 | /* Now socket is owned by kernel and we acquire BH lock | |
1972 | to finish close. No need to check for user refs. | |
1973 | */ | |
1974 | local_bh_disable(); | |
1975 | bh_lock_sock(sk); | |
1976 | WARN_ON(sock_owned_by_user(sk)); | |
1977 | ||
1978 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
1979 | ||
1980 | /* Have we already been destroyed by a softirq or backlog? */ | |
1981 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | |
1982 | goto out; | |
1983 | ||
1984 | /* This is a (useful) BSD violating of the RFC. There is a | |
1985 | * problem with TCP as specified in that the other end could | |
1986 | * keep a socket open forever with no application left this end. | |
1987 | * We use a 3 minute timeout (about the same as BSD) then kill | |
1988 | * our end. If they send after that then tough - BUT: long enough | |
1989 | * that we won't make the old 4*rto = almost no time - whoops | |
1990 | * reset mistake. | |
1991 | * | |
1992 | * Nope, it was not mistake. It is really desired behaviour | |
1993 | * f.e. on http servers, when such sockets are useless, but | |
1994 | * consume significant resources. Let's do it with special | |
1995 | * linger2 option. --ANK | |
1996 | */ | |
1997 | ||
1998 | if (sk->sk_state == TCP_FIN_WAIT2) { | |
1999 | struct tcp_sock *tp = tcp_sk(sk); | |
2000 | if (tp->linger2 < 0) { | |
2001 | tcp_set_state(sk, TCP_CLOSE); | |
2002 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
2003 | NET_INC_STATS_BH(sock_net(sk), | |
2004 | LINUX_MIB_TCPABORTONLINGER); | |
2005 | } else { | |
2006 | const int tmo = tcp_fin_time(sk); | |
2007 | ||
2008 | if (tmo > TCP_TIMEWAIT_LEN) { | |
2009 | inet_csk_reset_keepalive_timer(sk, | |
2010 | tmo - TCP_TIMEWAIT_LEN); | |
2011 | } else { | |
2012 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | |
2013 | goto out; | |
2014 | } | |
2015 | } | |
2016 | } | |
2017 | if (sk->sk_state != TCP_CLOSE) { | |
2018 | sk_mem_reclaim(sk); | |
2019 | if (tcp_too_many_orphans(sk, 0)) { | |
2020 | if (net_ratelimit()) | |
2021 | printk(KERN_INFO "TCP: too many of orphaned " | |
2022 | "sockets\n"); | |
2023 | tcp_set_state(sk, TCP_CLOSE); | |
2024 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
2025 | NET_INC_STATS_BH(sock_net(sk), | |
2026 | LINUX_MIB_TCPABORTONMEMORY); | |
2027 | } | |
2028 | } | |
2029 | ||
2030 | if (sk->sk_state == TCP_CLOSE) | |
2031 | inet_csk_destroy_sock(sk); | |
2032 | /* Otherwise, socket is reprieved until protocol close. */ | |
2033 | ||
2034 | out: | |
2035 | bh_unlock_sock(sk); | |
2036 | local_bh_enable(); | |
2037 | sock_put(sk); | |
2038 | } | |
2039 | EXPORT_SYMBOL(tcp_close); | |
2040 | ||
2041 | /* These states need RST on ABORT according to RFC793 */ | |
2042 | ||
2043 | static inline int tcp_need_reset(int state) | |
2044 | { | |
2045 | return (1 << state) & | |
2046 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | | |
2047 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); | |
2048 | } | |
2049 | ||
2050 | int tcp_disconnect(struct sock *sk, int flags) | |
2051 | { | |
2052 | struct inet_sock *inet = inet_sk(sk); | |
2053 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2054 | struct tcp_sock *tp = tcp_sk(sk); | |
2055 | int err = 0; | |
2056 | int old_state = sk->sk_state; | |
2057 | ||
2058 | if (old_state != TCP_CLOSE) | |
2059 | tcp_set_state(sk, TCP_CLOSE); | |
2060 | ||
2061 | /* ABORT function of RFC793 */ | |
2062 | if (old_state == TCP_LISTEN) { | |
2063 | inet_csk_listen_stop(sk); | |
2064 | } else if (tcp_need_reset(old_state) || | |
2065 | (tp->snd_nxt != tp->write_seq && | |
2066 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { | |
2067 | /* The last check adjusts for discrepancy of Linux wrt. RFC | |
2068 | * states | |
2069 | */ | |
2070 | tcp_send_active_reset(sk, gfp_any()); | |
2071 | sk->sk_err = ECONNRESET; | |
2072 | } else if (old_state == TCP_SYN_SENT) | |
2073 | sk->sk_err = ECONNRESET; | |
2074 | ||
2075 | tcp_clear_xmit_timers(sk); | |
2076 | __skb_queue_purge(&sk->sk_receive_queue); | |
2077 | tcp_write_queue_purge(sk); | |
2078 | __skb_queue_purge(&tp->out_of_order_queue); | |
2079 | #ifdef CONFIG_NET_DMA | |
2080 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
2081 | #endif | |
2082 | ||
2083 | inet->inet_dport = 0; | |
2084 | ||
2085 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
2086 | inet_reset_saddr(sk); | |
2087 | ||
2088 | sk->sk_shutdown = 0; | |
2089 | sock_reset_flag(sk, SOCK_DONE); | |
2090 | tp->srtt = 0; | |
2091 | if ((tp->write_seq += tp->max_window + 2) == 0) | |
2092 | tp->write_seq = 1; | |
2093 | icsk->icsk_backoff = 0; | |
2094 | tp->snd_cwnd = 2; | |
2095 | icsk->icsk_probes_out = 0; | |
2096 | tp->packets_out = 0; | |
2097 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
2098 | tp->snd_cwnd_cnt = 0; | |
2099 | tp->bytes_acked = 0; | |
2100 | tp->window_clamp = 0; | |
2101 | tcp_set_ca_state(sk, TCP_CA_Open); | |
2102 | tcp_clear_retrans(tp); | |
2103 | inet_csk_delack_init(sk); | |
2104 | tcp_init_send_head(sk); | |
2105 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); | |
2106 | __sk_dst_reset(sk); | |
2107 | ||
2108 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); | |
2109 | ||
2110 | sk->sk_error_report(sk); | |
2111 | return err; | |
2112 | } | |
2113 | EXPORT_SYMBOL(tcp_disconnect); | |
2114 | ||
2115 | /* | |
2116 | * Socket option code for TCP. | |
2117 | */ | |
2118 | static int do_tcp_setsockopt(struct sock *sk, int level, | |
2119 | int optname, char __user *optval, unsigned int optlen) | |
2120 | { | |
2121 | struct tcp_sock *tp = tcp_sk(sk); | |
2122 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2123 | int val; | |
2124 | int err = 0; | |
2125 | ||
2126 | /* These are data/string values, all the others are ints */ | |
2127 | switch (optname) { | |
2128 | case TCP_CONGESTION: { | |
2129 | char name[TCP_CA_NAME_MAX]; | |
2130 | ||
2131 | if (optlen < 1) | |
2132 | return -EINVAL; | |
2133 | ||
2134 | val = strncpy_from_user(name, optval, | |
2135 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); | |
2136 | if (val < 0) | |
2137 | return -EFAULT; | |
2138 | name[val] = 0; | |
2139 | ||
2140 | lock_sock(sk); | |
2141 | err = tcp_set_congestion_control(sk, name); | |
2142 | release_sock(sk); | |
2143 | return err; | |
2144 | } | |
2145 | case TCP_COOKIE_TRANSACTIONS: { | |
2146 | struct tcp_cookie_transactions ctd; | |
2147 | struct tcp_cookie_values *cvp = NULL; | |
2148 | ||
2149 | if (sizeof(ctd) > optlen) | |
2150 | return -EINVAL; | |
2151 | if (copy_from_user(&ctd, optval, sizeof(ctd))) | |
2152 | return -EFAULT; | |
2153 | ||
2154 | if (ctd.tcpct_used > sizeof(ctd.tcpct_value) || | |
2155 | ctd.tcpct_s_data_desired > TCP_MSS_DESIRED) | |
2156 | return -EINVAL; | |
2157 | ||
2158 | if (ctd.tcpct_cookie_desired == 0) { | |
2159 | /* default to global value */ | |
2160 | } else if ((0x1 & ctd.tcpct_cookie_desired) || | |
2161 | ctd.tcpct_cookie_desired > TCP_COOKIE_MAX || | |
2162 | ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) { | |
2163 | return -EINVAL; | |
2164 | } | |
2165 | ||
2166 | if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) { | |
2167 | /* Supercedes all other values */ | |
2168 | lock_sock(sk); | |
2169 | if (tp->cookie_values != NULL) { | |
2170 | kref_put(&tp->cookie_values->kref, | |
2171 | tcp_cookie_values_release); | |
2172 | tp->cookie_values = NULL; | |
2173 | } | |
2174 | tp->rx_opt.cookie_in_always = 0; /* false */ | |
2175 | tp->rx_opt.cookie_out_never = 1; /* true */ | |
2176 | release_sock(sk); | |
2177 | return err; | |
2178 | } | |
2179 | ||
2180 | /* Allocate ancillary memory before locking. | |
2181 | */ | |
2182 | if (ctd.tcpct_used > 0 || | |
2183 | (tp->cookie_values == NULL && | |
2184 | (sysctl_tcp_cookie_size > 0 || | |
2185 | ctd.tcpct_cookie_desired > 0 || | |
2186 | ctd.tcpct_s_data_desired > 0))) { | |
2187 | cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used, | |
2188 | GFP_KERNEL); | |
2189 | if (cvp == NULL) | |
2190 | return -ENOMEM; | |
2191 | ||
2192 | kref_init(&cvp->kref); | |
2193 | } | |
2194 | lock_sock(sk); | |
2195 | tp->rx_opt.cookie_in_always = | |
2196 | (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags); | |
2197 | tp->rx_opt.cookie_out_never = 0; /* false */ | |
2198 | ||
2199 | if (tp->cookie_values != NULL) { | |
2200 | if (cvp != NULL) { | |
2201 | /* Changed values are recorded by a changed | |
2202 | * pointer, ensuring the cookie will differ, | |
2203 | * without separately hashing each value later. | |
2204 | */ | |
2205 | kref_put(&tp->cookie_values->kref, | |
2206 | tcp_cookie_values_release); | |
2207 | } else { | |
2208 | cvp = tp->cookie_values; | |
2209 | } | |
2210 | } | |
2211 | ||
2212 | if (cvp != NULL) { | |
2213 | cvp->cookie_desired = ctd.tcpct_cookie_desired; | |
2214 | ||
2215 | if (ctd.tcpct_used > 0) { | |
2216 | memcpy(cvp->s_data_payload, ctd.tcpct_value, | |
2217 | ctd.tcpct_used); | |
2218 | cvp->s_data_desired = ctd.tcpct_used; | |
2219 | cvp->s_data_constant = 1; /* true */ | |
2220 | } else { | |
2221 | /* No constant payload data. */ | |
2222 | cvp->s_data_desired = ctd.tcpct_s_data_desired; | |
2223 | cvp->s_data_constant = 0; /* false */ | |
2224 | } | |
2225 | ||
2226 | tp->cookie_values = cvp; | |
2227 | } | |
2228 | release_sock(sk); | |
2229 | return err; | |
2230 | } | |
2231 | default: | |
2232 | /* fallthru */ | |
2233 | break; | |
2234 | } | |
2235 | ||
2236 | if (optlen < sizeof(int)) | |
2237 | return -EINVAL; | |
2238 | ||
2239 | if (get_user(val, (int __user *)optval)) | |
2240 | return -EFAULT; | |
2241 | ||
2242 | lock_sock(sk); | |
2243 | ||
2244 | switch (optname) { | |
2245 | case TCP_MAXSEG: | |
2246 | /* Values greater than interface MTU won't take effect. However | |
2247 | * at the point when this call is done we typically don't yet | |
2248 | * know which interface is going to be used */ | |
2249 | if (val < 64 || val > MAX_TCP_WINDOW) { | |
2250 | err = -EINVAL; | |
2251 | break; | |
2252 | } | |
2253 | tp->rx_opt.user_mss = val; | |
2254 | break; | |
2255 | ||
2256 | case TCP_NODELAY: | |
2257 | if (val) { | |
2258 | /* TCP_NODELAY is weaker than TCP_CORK, so that | |
2259 | * this option on corked socket is remembered, but | |
2260 | * it is not activated until cork is cleared. | |
2261 | * | |
2262 | * However, when TCP_NODELAY is set we make | |
2263 | * an explicit push, which overrides even TCP_CORK | |
2264 | * for currently queued segments. | |
2265 | */ | |
2266 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; | |
2267 | tcp_push_pending_frames(sk); | |
2268 | } else { | |
2269 | tp->nonagle &= ~TCP_NAGLE_OFF; | |
2270 | } | |
2271 | break; | |
2272 | ||
2273 | case TCP_THIN_LINEAR_TIMEOUTS: | |
2274 | if (val < 0 || val > 1) | |
2275 | err = -EINVAL; | |
2276 | else | |
2277 | tp->thin_lto = val; | |
2278 | break; | |
2279 | ||
2280 | case TCP_THIN_DUPACK: | |
2281 | if (val < 0 || val > 1) | |
2282 | err = -EINVAL; | |
2283 | else | |
2284 | tp->thin_dupack = val; | |
2285 | break; | |
2286 | ||
2287 | case TCP_CORK: | |
2288 | /* When set indicates to always queue non-full frames. | |
2289 | * Later the user clears this option and we transmit | |
2290 | * any pending partial frames in the queue. This is | |
2291 | * meant to be used alongside sendfile() to get properly | |
2292 | * filled frames when the user (for example) must write | |
2293 | * out headers with a write() call first and then use | |
2294 | * sendfile to send out the data parts. | |
2295 | * | |
2296 | * TCP_CORK can be set together with TCP_NODELAY and it is | |
2297 | * stronger than TCP_NODELAY. | |
2298 | */ | |
2299 | if (val) { | |
2300 | tp->nonagle |= TCP_NAGLE_CORK; | |
2301 | } else { | |
2302 | tp->nonagle &= ~TCP_NAGLE_CORK; | |
2303 | if (tp->nonagle&TCP_NAGLE_OFF) | |
2304 | tp->nonagle |= TCP_NAGLE_PUSH; | |
2305 | tcp_push_pending_frames(sk); | |
2306 | } | |
2307 | break; | |
2308 | ||
2309 | case TCP_KEEPIDLE: | |
2310 | if (val < 1 || val > MAX_TCP_KEEPIDLE) | |
2311 | err = -EINVAL; | |
2312 | else { | |
2313 | tp->keepalive_time = val * HZ; | |
2314 | if (sock_flag(sk, SOCK_KEEPOPEN) && | |
2315 | !((1 << sk->sk_state) & | |
2316 | (TCPF_CLOSE | TCPF_LISTEN))) { | |
2317 | u32 elapsed = keepalive_time_elapsed(tp); | |
2318 | if (tp->keepalive_time > elapsed) | |
2319 | elapsed = tp->keepalive_time - elapsed; | |
2320 | else | |
2321 | elapsed = 0; | |
2322 | inet_csk_reset_keepalive_timer(sk, elapsed); | |
2323 | } | |
2324 | } | |
2325 | break; | |
2326 | case TCP_KEEPINTVL: | |
2327 | if (val < 1 || val > MAX_TCP_KEEPINTVL) | |
2328 | err = -EINVAL; | |
2329 | else | |
2330 | tp->keepalive_intvl = val * HZ; | |
2331 | break; | |
2332 | case TCP_KEEPCNT: | |
2333 | if (val < 1 || val > MAX_TCP_KEEPCNT) | |
2334 | err = -EINVAL; | |
2335 | else | |
2336 | tp->keepalive_probes = val; | |
2337 | break; | |
2338 | case TCP_SYNCNT: | |
2339 | if (val < 1 || val > MAX_TCP_SYNCNT) | |
2340 | err = -EINVAL; | |
2341 | else | |
2342 | icsk->icsk_syn_retries = val; | |
2343 | break; | |
2344 | ||
2345 | case TCP_LINGER2: | |
2346 | if (val < 0) | |
2347 | tp->linger2 = -1; | |
2348 | else if (val > sysctl_tcp_fin_timeout / HZ) | |
2349 | tp->linger2 = 0; | |
2350 | else | |
2351 | tp->linger2 = val * HZ; | |
2352 | break; | |
2353 | ||
2354 | case TCP_DEFER_ACCEPT: | |
2355 | /* Translate value in seconds to number of retransmits */ | |
2356 | icsk->icsk_accept_queue.rskq_defer_accept = | |
2357 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, | |
2358 | TCP_RTO_MAX / HZ); | |
2359 | break; | |
2360 | ||
2361 | case TCP_WINDOW_CLAMP: | |
2362 | if (!val) { | |
2363 | if (sk->sk_state != TCP_CLOSE) { | |
2364 | err = -EINVAL; | |
2365 | break; | |
2366 | } | |
2367 | tp->window_clamp = 0; | |
2368 | } else | |
2369 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? | |
2370 | SOCK_MIN_RCVBUF / 2 : val; | |
2371 | break; | |
2372 | ||
2373 | case TCP_QUICKACK: | |
2374 | if (!val) { | |
2375 | icsk->icsk_ack.pingpong = 1; | |
2376 | } else { | |
2377 | icsk->icsk_ack.pingpong = 0; | |
2378 | if ((1 << sk->sk_state) & | |
2379 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | |
2380 | inet_csk_ack_scheduled(sk)) { | |
2381 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; | |
2382 | tcp_cleanup_rbuf(sk, 1); | |
2383 | if (!(val & 1)) | |
2384 | icsk->icsk_ack.pingpong = 1; | |
2385 | } | |
2386 | } | |
2387 | break; | |
2388 | ||
2389 | #ifdef CONFIG_TCP_MD5SIG | |
2390 | case TCP_MD5SIG: | |
2391 | /* Read the IP->Key mappings from userspace */ | |
2392 | err = tp->af_specific->md5_parse(sk, optval, optlen); | |
2393 | break; | |
2394 | #endif | |
2395 | case TCP_USER_TIMEOUT: | |
2396 | /* Cap the max timeout in ms TCP will retry/retrans | |
2397 | * before giving up and aborting (ETIMEDOUT) a connection. | |
2398 | */ | |
2399 | icsk->icsk_user_timeout = msecs_to_jiffies(val); | |
2400 | break; | |
2401 | default: | |
2402 | err = -ENOPROTOOPT; | |
2403 | break; | |
2404 | } | |
2405 | ||
2406 | release_sock(sk); | |
2407 | return err; | |
2408 | } | |
2409 | ||
2410 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |
2411 | unsigned int optlen) | |
2412 | { | |
2413 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2414 | ||
2415 | if (level != SOL_TCP) | |
2416 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
2417 | optval, optlen); | |
2418 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2419 | } | |
2420 | EXPORT_SYMBOL(tcp_setsockopt); | |
2421 | ||
2422 | #ifdef CONFIG_COMPAT | |
2423 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, | |
2424 | char __user *optval, unsigned int optlen) | |
2425 | { | |
2426 | if (level != SOL_TCP) | |
2427 | return inet_csk_compat_setsockopt(sk, level, optname, | |
2428 | optval, optlen); | |
2429 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2430 | } | |
2431 | EXPORT_SYMBOL(compat_tcp_setsockopt); | |
2432 | #endif | |
2433 | ||
2434 | /* Return information about state of tcp endpoint in API format. */ | |
2435 | void tcp_get_info(struct sock *sk, struct tcp_info *info) | |
2436 | { | |
2437 | struct tcp_sock *tp = tcp_sk(sk); | |
2438 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
2439 | u32 now = tcp_time_stamp; | |
2440 | ||
2441 | memset(info, 0, sizeof(*info)); | |
2442 | ||
2443 | info->tcpi_state = sk->sk_state; | |
2444 | info->tcpi_ca_state = icsk->icsk_ca_state; | |
2445 | info->tcpi_retransmits = icsk->icsk_retransmits; | |
2446 | info->tcpi_probes = icsk->icsk_probes_out; | |
2447 | info->tcpi_backoff = icsk->icsk_backoff; | |
2448 | ||
2449 | if (tp->rx_opt.tstamp_ok) | |
2450 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | |
2451 | if (tcp_is_sack(tp)) | |
2452 | info->tcpi_options |= TCPI_OPT_SACK; | |
2453 | if (tp->rx_opt.wscale_ok) { | |
2454 | info->tcpi_options |= TCPI_OPT_WSCALE; | |
2455 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; | |
2456 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | |
2457 | } | |
2458 | ||
2459 | if (tp->ecn_flags&TCP_ECN_OK) | |
2460 | info->tcpi_options |= TCPI_OPT_ECN; | |
2461 | ||
2462 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); | |
2463 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | |
2464 | info->tcpi_snd_mss = tp->mss_cache; | |
2465 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; | |
2466 | ||
2467 | if (sk->sk_state == TCP_LISTEN) { | |
2468 | info->tcpi_unacked = sk->sk_ack_backlog; | |
2469 | info->tcpi_sacked = sk->sk_max_ack_backlog; | |
2470 | } else { | |
2471 | info->tcpi_unacked = tp->packets_out; | |
2472 | info->tcpi_sacked = tp->sacked_out; | |
2473 | } | |
2474 | info->tcpi_lost = tp->lost_out; | |
2475 | info->tcpi_retrans = tp->retrans_out; | |
2476 | info->tcpi_fackets = tp->fackets_out; | |
2477 | ||
2478 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | |
2479 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); | |
2480 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); | |
2481 | ||
2482 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; | |
2483 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; | |
2484 | info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; | |
2485 | info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; | |
2486 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; | |
2487 | info->tcpi_snd_cwnd = tp->snd_cwnd; | |
2488 | info->tcpi_advmss = tp->advmss; | |
2489 | info->tcpi_reordering = tp->reordering; | |
2490 | ||
2491 | info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; | |
2492 | info->tcpi_rcv_space = tp->rcvq_space.space; | |
2493 | ||
2494 | info->tcpi_total_retrans = tp->total_retrans; | |
2495 | } | |
2496 | EXPORT_SYMBOL_GPL(tcp_get_info); | |
2497 | ||
2498 | static int do_tcp_getsockopt(struct sock *sk, int level, | |
2499 | int optname, char __user *optval, int __user *optlen) | |
2500 | { | |
2501 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2502 | struct tcp_sock *tp = tcp_sk(sk); | |
2503 | int val, len; | |
2504 | ||
2505 | if (get_user(len, optlen)) | |
2506 | return -EFAULT; | |
2507 | ||
2508 | len = min_t(unsigned int, len, sizeof(int)); | |
2509 | ||
2510 | if (len < 0) | |
2511 | return -EINVAL; | |
2512 | ||
2513 | switch (optname) { | |
2514 | case TCP_MAXSEG: | |
2515 | val = tp->mss_cache; | |
2516 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) | |
2517 | val = tp->rx_opt.user_mss; | |
2518 | break; | |
2519 | case TCP_NODELAY: | |
2520 | val = !!(tp->nonagle&TCP_NAGLE_OFF); | |
2521 | break; | |
2522 | case TCP_CORK: | |
2523 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | |
2524 | break; | |
2525 | case TCP_KEEPIDLE: | |
2526 | val = keepalive_time_when(tp) / HZ; | |
2527 | break; | |
2528 | case TCP_KEEPINTVL: | |
2529 | val = keepalive_intvl_when(tp) / HZ; | |
2530 | break; | |
2531 | case TCP_KEEPCNT: | |
2532 | val = keepalive_probes(tp); | |
2533 | break; | |
2534 | case TCP_SYNCNT: | |
2535 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | |
2536 | break; | |
2537 | case TCP_LINGER2: | |
2538 | val = tp->linger2; | |
2539 | if (val >= 0) | |
2540 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | |
2541 | break; | |
2542 | case TCP_DEFER_ACCEPT: | |
2543 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, | |
2544 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); | |
2545 | break; | |
2546 | case TCP_WINDOW_CLAMP: | |
2547 | val = tp->window_clamp; | |
2548 | break; | |
2549 | case TCP_INFO: { | |
2550 | struct tcp_info info; | |
2551 | ||
2552 | if (get_user(len, optlen)) | |
2553 | return -EFAULT; | |
2554 | ||
2555 | tcp_get_info(sk, &info); | |
2556 | ||
2557 | len = min_t(unsigned int, len, sizeof(info)); | |
2558 | if (put_user(len, optlen)) | |
2559 | return -EFAULT; | |
2560 | if (copy_to_user(optval, &info, len)) | |
2561 | return -EFAULT; | |
2562 | return 0; | |
2563 | } | |
2564 | case TCP_QUICKACK: | |
2565 | val = !icsk->icsk_ack.pingpong; | |
2566 | break; | |
2567 | ||
2568 | case TCP_CONGESTION: | |
2569 | if (get_user(len, optlen)) | |
2570 | return -EFAULT; | |
2571 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); | |
2572 | if (put_user(len, optlen)) | |
2573 | return -EFAULT; | |
2574 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) | |
2575 | return -EFAULT; | |
2576 | return 0; | |
2577 | ||
2578 | case TCP_COOKIE_TRANSACTIONS: { | |
2579 | struct tcp_cookie_transactions ctd; | |
2580 | struct tcp_cookie_values *cvp = tp->cookie_values; | |
2581 | ||
2582 | if (get_user(len, optlen)) | |
2583 | return -EFAULT; | |
2584 | if (len < sizeof(ctd)) | |
2585 | return -EINVAL; | |
2586 | ||
2587 | memset(&ctd, 0, sizeof(ctd)); | |
2588 | ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ? | |
2589 | TCP_COOKIE_IN_ALWAYS : 0) | |
2590 | | (tp->rx_opt.cookie_out_never ? | |
2591 | TCP_COOKIE_OUT_NEVER : 0); | |
2592 | ||
2593 | if (cvp != NULL) { | |
2594 | ctd.tcpct_flags |= (cvp->s_data_in ? | |
2595 | TCP_S_DATA_IN : 0) | |
2596 | | (cvp->s_data_out ? | |
2597 | TCP_S_DATA_OUT : 0); | |
2598 | ||
2599 | ctd.tcpct_cookie_desired = cvp->cookie_desired; | |
2600 | ctd.tcpct_s_data_desired = cvp->s_data_desired; | |
2601 | ||
2602 | memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], | |
2603 | cvp->cookie_pair_size); | |
2604 | ctd.tcpct_used = cvp->cookie_pair_size; | |
2605 | } | |
2606 | ||
2607 | if (put_user(sizeof(ctd), optlen)) | |
2608 | return -EFAULT; | |
2609 | if (copy_to_user(optval, &ctd, sizeof(ctd))) | |
2610 | return -EFAULT; | |
2611 | return 0; | |
2612 | } | |
2613 | case TCP_THIN_LINEAR_TIMEOUTS: | |
2614 | val = tp->thin_lto; | |
2615 | break; | |
2616 | case TCP_THIN_DUPACK: | |
2617 | val = tp->thin_dupack; | |
2618 | break; | |
2619 | ||
2620 | case TCP_USER_TIMEOUT: | |
2621 | val = jiffies_to_msecs(icsk->icsk_user_timeout); | |
2622 | break; | |
2623 | default: | |
2624 | return -ENOPROTOOPT; | |
2625 | } | |
2626 | ||
2627 | if (put_user(len, optlen)) | |
2628 | return -EFAULT; | |
2629 | if (copy_to_user(optval, &val, len)) | |
2630 | return -EFAULT; | |
2631 | return 0; | |
2632 | } | |
2633 | ||
2634 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |
2635 | int __user *optlen) | |
2636 | { | |
2637 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2638 | ||
2639 | if (level != SOL_TCP) | |
2640 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
2641 | optval, optlen); | |
2642 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2643 | } | |
2644 | EXPORT_SYMBOL(tcp_getsockopt); | |
2645 | ||
2646 | #ifdef CONFIG_COMPAT | |
2647 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, | |
2648 | char __user *optval, int __user *optlen) | |
2649 | { | |
2650 | if (level != SOL_TCP) | |
2651 | return inet_csk_compat_getsockopt(sk, level, optname, | |
2652 | optval, optlen); | |
2653 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2654 | } | |
2655 | EXPORT_SYMBOL(compat_tcp_getsockopt); | |
2656 | #endif | |
2657 | ||
2658 | struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |
2659 | { | |
2660 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
2661 | struct tcphdr *th; | |
2662 | unsigned thlen; | |
2663 | unsigned int seq; | |
2664 | __be32 delta; | |
2665 | unsigned int oldlen; | |
2666 | unsigned int mss; | |
2667 | ||
2668 | if (!pskb_may_pull(skb, sizeof(*th))) | |
2669 | goto out; | |
2670 | ||
2671 | th = tcp_hdr(skb); | |
2672 | thlen = th->doff * 4; | |
2673 | if (thlen < sizeof(*th)) | |
2674 | goto out; | |
2675 | ||
2676 | if (!pskb_may_pull(skb, thlen)) | |
2677 | goto out; | |
2678 | ||
2679 | oldlen = (u16)~skb->len; | |
2680 | __skb_pull(skb, thlen); | |
2681 | ||
2682 | mss = skb_shinfo(skb)->gso_size; | |
2683 | if (unlikely(skb->len <= mss)) | |
2684 | goto out; | |
2685 | ||
2686 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | |
2687 | /* Packet is from an untrusted source, reset gso_segs. */ | |
2688 | int type = skb_shinfo(skb)->gso_type; | |
2689 | ||
2690 | if (unlikely(type & | |
2691 | ~(SKB_GSO_TCPV4 | | |
2692 | SKB_GSO_DODGY | | |
2693 | SKB_GSO_TCP_ECN | | |
2694 | SKB_GSO_TCPV6 | | |
2695 | 0) || | |
2696 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | |
2697 | goto out; | |
2698 | ||
2699 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | |
2700 | ||
2701 | segs = NULL; | |
2702 | goto out; | |
2703 | } | |
2704 | ||
2705 | segs = skb_segment(skb, features); | |
2706 | if (IS_ERR(segs)) | |
2707 | goto out; | |
2708 | ||
2709 | delta = htonl(oldlen + (thlen + mss)); | |
2710 | ||
2711 | skb = segs; | |
2712 | th = tcp_hdr(skb); | |
2713 | seq = ntohl(th->seq); | |
2714 | ||
2715 | do { | |
2716 | th->fin = th->psh = 0; | |
2717 | ||
2718 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + | |
2719 | (__force u32)delta)); | |
2720 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2721 | th->check = | |
2722 | csum_fold(csum_partial(skb_transport_header(skb), | |
2723 | thlen, skb->csum)); | |
2724 | ||
2725 | seq += mss; | |
2726 | skb = skb->next; | |
2727 | th = tcp_hdr(skb); | |
2728 | ||
2729 | th->seq = htonl(seq); | |
2730 | th->cwr = 0; | |
2731 | } while (skb->next); | |
2732 | ||
2733 | delta = htonl(oldlen + (skb->tail - skb->transport_header) + | |
2734 | skb->data_len); | |
2735 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + | |
2736 | (__force u32)delta)); | |
2737 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2738 | th->check = csum_fold(csum_partial(skb_transport_header(skb), | |
2739 | thlen, skb->csum)); | |
2740 | ||
2741 | out: | |
2742 | return segs; | |
2743 | } | |
2744 | EXPORT_SYMBOL(tcp_tso_segment); | |
2745 | ||
2746 | struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |
2747 | { | |
2748 | struct sk_buff **pp = NULL; | |
2749 | struct sk_buff *p; | |
2750 | struct tcphdr *th; | |
2751 | struct tcphdr *th2; | |
2752 | unsigned int len; | |
2753 | unsigned int thlen; | |
2754 | __be32 flags; | |
2755 | unsigned int mss = 1; | |
2756 | unsigned int hlen; | |
2757 | unsigned int off; | |
2758 | int flush = 1; | |
2759 | int i; | |
2760 | ||
2761 | off = skb_gro_offset(skb); | |
2762 | hlen = off + sizeof(*th); | |
2763 | th = skb_gro_header_fast(skb, off); | |
2764 | if (skb_gro_header_hard(skb, hlen)) { | |
2765 | th = skb_gro_header_slow(skb, hlen, off); | |
2766 | if (unlikely(!th)) | |
2767 | goto out; | |
2768 | } | |
2769 | ||
2770 | thlen = th->doff * 4; | |
2771 | if (thlen < sizeof(*th)) | |
2772 | goto out; | |
2773 | ||
2774 | hlen = off + thlen; | |
2775 | if (skb_gro_header_hard(skb, hlen)) { | |
2776 | th = skb_gro_header_slow(skb, hlen, off); | |
2777 | if (unlikely(!th)) | |
2778 | goto out; | |
2779 | } | |
2780 | ||
2781 | skb_gro_pull(skb, thlen); | |
2782 | ||
2783 | len = skb_gro_len(skb); | |
2784 | flags = tcp_flag_word(th); | |
2785 | ||
2786 | for (; (p = *head); head = &p->next) { | |
2787 | if (!NAPI_GRO_CB(p)->same_flow) | |
2788 | continue; | |
2789 | ||
2790 | th2 = tcp_hdr(p); | |
2791 | ||
2792 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { | |
2793 | NAPI_GRO_CB(p)->same_flow = 0; | |
2794 | continue; | |
2795 | } | |
2796 | ||
2797 | goto found; | |
2798 | } | |
2799 | ||
2800 | goto out_check_final; | |
2801 | ||
2802 | found: | |
2803 | flush = NAPI_GRO_CB(p)->flush; | |
2804 | flush |= (__force int)(flags & TCP_FLAG_CWR); | |
2805 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & | |
2806 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); | |
2807 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); | |
2808 | for (i = sizeof(*th); i < thlen; i += 4) | |
2809 | flush |= *(u32 *)((u8 *)th + i) ^ | |
2810 | *(u32 *)((u8 *)th2 + i); | |
2811 | ||
2812 | mss = skb_shinfo(p)->gso_size; | |
2813 | ||
2814 | flush |= (len - 1) >= mss; | |
2815 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); | |
2816 | ||
2817 | if (flush || skb_gro_receive(head, skb)) { | |
2818 | mss = 1; | |
2819 | goto out_check_final; | |
2820 | } | |
2821 | ||
2822 | p = *head; | |
2823 | th2 = tcp_hdr(p); | |
2824 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); | |
2825 | ||
2826 | out_check_final: | |
2827 | flush = len < mss; | |
2828 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | | |
2829 | TCP_FLAG_RST | TCP_FLAG_SYN | | |
2830 | TCP_FLAG_FIN)); | |
2831 | ||
2832 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) | |
2833 | pp = head; | |
2834 | ||
2835 | out: | |
2836 | NAPI_GRO_CB(skb)->flush |= flush; | |
2837 | ||
2838 | return pp; | |
2839 | } | |
2840 | EXPORT_SYMBOL(tcp_gro_receive); | |
2841 | ||
2842 | int tcp_gro_complete(struct sk_buff *skb) | |
2843 | { | |
2844 | struct tcphdr *th = tcp_hdr(skb); | |
2845 | ||
2846 | skb->csum_start = skb_transport_header(skb) - skb->head; | |
2847 | skb->csum_offset = offsetof(struct tcphdr, check); | |
2848 | skb->ip_summed = CHECKSUM_PARTIAL; | |
2849 | ||
2850 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; | |
2851 | ||
2852 | if (th->cwr) | |
2853 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | |
2854 | ||
2855 | return 0; | |
2856 | } | |
2857 | EXPORT_SYMBOL(tcp_gro_complete); | |
2858 | ||
2859 | #ifdef CONFIG_TCP_MD5SIG | |
2860 | static unsigned long tcp_md5sig_users; | |
2861 | static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; | |
2862 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); | |
2863 | ||
2864 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) | |
2865 | { | |
2866 | int cpu; | |
2867 | for_each_possible_cpu(cpu) { | |
2868 | struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); | |
2869 | if (p) { | |
2870 | if (p->md5_desc.tfm) | |
2871 | crypto_free_hash(p->md5_desc.tfm); | |
2872 | kfree(p); | |
2873 | } | |
2874 | } | |
2875 | free_percpu(pool); | |
2876 | } | |
2877 | ||
2878 | void tcp_free_md5sig_pool(void) | |
2879 | { | |
2880 | struct tcp_md5sig_pool * __percpu *pool = NULL; | |
2881 | ||
2882 | spin_lock_bh(&tcp_md5sig_pool_lock); | |
2883 | if (--tcp_md5sig_users == 0) { | |
2884 | pool = tcp_md5sig_pool; | |
2885 | tcp_md5sig_pool = NULL; | |
2886 | } | |
2887 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2888 | if (pool) | |
2889 | __tcp_free_md5sig_pool(pool); | |
2890 | } | |
2891 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | |
2892 | ||
2893 | static struct tcp_md5sig_pool * __percpu * | |
2894 | __tcp_alloc_md5sig_pool(struct sock *sk) | |
2895 | { | |
2896 | int cpu; | |
2897 | struct tcp_md5sig_pool * __percpu *pool; | |
2898 | ||
2899 | pool = alloc_percpu(struct tcp_md5sig_pool *); | |
2900 | if (!pool) | |
2901 | return NULL; | |
2902 | ||
2903 | for_each_possible_cpu(cpu) { | |
2904 | struct tcp_md5sig_pool *p; | |
2905 | struct crypto_hash *hash; | |
2906 | ||
2907 | p = kzalloc(sizeof(*p), sk->sk_allocation); | |
2908 | if (!p) | |
2909 | goto out_free; | |
2910 | *per_cpu_ptr(pool, cpu) = p; | |
2911 | ||
2912 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | |
2913 | if (!hash || IS_ERR(hash)) | |
2914 | goto out_free; | |
2915 | ||
2916 | p->md5_desc.tfm = hash; | |
2917 | } | |
2918 | return pool; | |
2919 | out_free: | |
2920 | __tcp_free_md5sig_pool(pool); | |
2921 | return NULL; | |
2922 | } | |
2923 | ||
2924 | struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) | |
2925 | { | |
2926 | struct tcp_md5sig_pool * __percpu *pool; | |
2927 | int alloc = 0; | |
2928 | ||
2929 | retry: | |
2930 | spin_lock_bh(&tcp_md5sig_pool_lock); | |
2931 | pool = tcp_md5sig_pool; | |
2932 | if (tcp_md5sig_users++ == 0) { | |
2933 | alloc = 1; | |
2934 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2935 | } else if (!pool) { | |
2936 | tcp_md5sig_users--; | |
2937 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2938 | cpu_relax(); | |
2939 | goto retry; | |
2940 | } else | |
2941 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2942 | ||
2943 | if (alloc) { | |
2944 | /* we cannot hold spinlock here because this may sleep. */ | |
2945 | struct tcp_md5sig_pool * __percpu *p; | |
2946 | ||
2947 | p = __tcp_alloc_md5sig_pool(sk); | |
2948 | spin_lock_bh(&tcp_md5sig_pool_lock); | |
2949 | if (!p) { | |
2950 | tcp_md5sig_users--; | |
2951 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2952 | return NULL; | |
2953 | } | |
2954 | pool = tcp_md5sig_pool; | |
2955 | if (pool) { | |
2956 | /* oops, it has already been assigned. */ | |
2957 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2958 | __tcp_free_md5sig_pool(p); | |
2959 | } else { | |
2960 | tcp_md5sig_pool = pool = p; | |
2961 | spin_unlock_bh(&tcp_md5sig_pool_lock); | |
2962 | } | |
2963 | } | |
2964 | return pool; | |
2965 | } | |
2966 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |
2967 | ||
2968 | ||
2969 | /** | |
2970 | * tcp_get_md5sig_pool - get md5sig_pool for this user | |
2971 | * | |
2972 | * We use percpu structure, so if we succeed, we exit with preemption | |
2973 | * and BH disabled, to make sure another thread or softirq handling | |
2974 | * wont try to get same context. | |
2975 | */ | |
2976 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
2977 | { | |
2978 | struct tcp_md5sig_pool * __percpu *p; | |
2979 | ||
2980 | local_bh_disable(); | |
2981 | ||
2982 | spin_lock(&tcp_md5sig_pool_lock); | |
2983 | p = tcp_md5sig_pool; | |
2984 | if (p) | |
2985 | tcp_md5sig_users++; | |
2986 | spin_unlock(&tcp_md5sig_pool_lock); | |
2987 | ||
2988 | if (p) | |
2989 | return *this_cpu_ptr(p); | |
2990 | ||
2991 | local_bh_enable(); | |
2992 | return NULL; | |
2993 | } | |
2994 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | |
2995 | ||
2996 | void tcp_put_md5sig_pool(void) | |
2997 | { | |
2998 | local_bh_enable(); | |
2999 | tcp_free_md5sig_pool(); | |
3000 | } | |
3001 | EXPORT_SYMBOL(tcp_put_md5sig_pool); | |
3002 | ||
3003 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | |
3004 | struct tcphdr *th) | |
3005 | { | |
3006 | struct scatterlist sg; | |
3007 | int err; | |
3008 | ||
3009 | __sum16 old_checksum = th->check; | |
3010 | th->check = 0; | |
3011 | /* options aren't included in the hash */ | |
3012 | sg_init_one(&sg, th, sizeof(struct tcphdr)); | |
3013 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); | |
3014 | th->check = old_checksum; | |
3015 | return err; | |
3016 | } | |
3017 | EXPORT_SYMBOL(tcp_md5_hash_header); | |
3018 | ||
3019 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |
3020 | struct sk_buff *skb, unsigned header_len) | |
3021 | { | |
3022 | struct scatterlist sg; | |
3023 | const struct tcphdr *tp = tcp_hdr(skb); | |
3024 | struct hash_desc *desc = &hp->md5_desc; | |
3025 | unsigned i; | |
3026 | const unsigned head_data_len = skb_headlen(skb) > header_len ? | |
3027 | skb_headlen(skb) - header_len : 0; | |
3028 | const struct skb_shared_info *shi = skb_shinfo(skb); | |
3029 | struct sk_buff *frag_iter; | |
3030 | ||
3031 | sg_init_table(&sg, 1); | |
3032 | ||
3033 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | |
3034 | if (crypto_hash_update(desc, &sg, head_data_len)) | |
3035 | return 1; | |
3036 | ||
3037 | for (i = 0; i < shi->nr_frags; ++i) { | |
3038 | const struct skb_frag_struct *f = &shi->frags[i]; | |
3039 | sg_set_page(&sg, f->page, f->size, f->page_offset); | |
3040 | if (crypto_hash_update(desc, &sg, f->size)) | |
3041 | return 1; | |
3042 | } | |
3043 | ||
3044 | skb_walk_frags(skb, frag_iter) | |
3045 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) | |
3046 | return 1; | |
3047 | ||
3048 | return 0; | |
3049 | } | |
3050 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); | |
3051 | ||
3052 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) | |
3053 | { | |
3054 | struct scatterlist sg; | |
3055 | ||
3056 | sg_init_one(&sg, key->key, key->keylen); | |
3057 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | |
3058 | } | |
3059 | EXPORT_SYMBOL(tcp_md5_hash_key); | |
3060 | ||
3061 | #endif | |
3062 | ||
3063 | /** | |
3064 | * Each Responder maintains up to two secret values concurrently for | |
3065 | * efficient secret rollover. Each secret value has 4 states: | |
3066 | * | |
3067 | * Generating. (tcp_secret_generating != tcp_secret_primary) | |
3068 | * Generates new Responder-Cookies, but not yet used for primary | |
3069 | * verification. This is a short-term state, typically lasting only | |
3070 | * one round trip time (RTT). | |
3071 | * | |
3072 | * Primary. (tcp_secret_generating == tcp_secret_primary) | |
3073 | * Used both for generation and primary verification. | |
3074 | * | |
3075 | * Retiring. (tcp_secret_retiring != tcp_secret_secondary) | |
3076 | * Used for verification, until the first failure that can be | |
3077 | * verified by the newer Generating secret. At that time, this | |
3078 | * cookie's state is changed to Secondary, and the Generating | |
3079 | * cookie's state is changed to Primary. This is a short-term state, | |
3080 | * typically lasting only one round trip time (RTT). | |
3081 | * | |
3082 | * Secondary. (tcp_secret_retiring == tcp_secret_secondary) | |
3083 | * Used for secondary verification, after primary verification | |
3084 | * failures. This state lasts no more than twice the Maximum Segment | |
3085 | * Lifetime (2MSL). Then, the secret is discarded. | |
3086 | */ | |
3087 | struct tcp_cookie_secret { | |
3088 | /* The secret is divided into two parts. The digest part is the | |
3089 | * equivalent of previously hashing a secret and saving the state, | |
3090 | * and serves as an initialization vector (IV). The message part | |
3091 | * serves as the trailing secret. | |
3092 | */ | |
3093 | u32 secrets[COOKIE_WORKSPACE_WORDS]; | |
3094 | unsigned long expires; | |
3095 | }; | |
3096 | ||
3097 | #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL) | |
3098 | #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2) | |
3099 | #define TCP_SECRET_LIFE (HZ * 600) | |
3100 | ||
3101 | static struct tcp_cookie_secret tcp_secret_one; | |
3102 | static struct tcp_cookie_secret tcp_secret_two; | |
3103 | ||
3104 | /* Essentially a circular list, without dynamic allocation. */ | |
3105 | static struct tcp_cookie_secret *tcp_secret_generating; | |
3106 | static struct tcp_cookie_secret *tcp_secret_primary; | |
3107 | static struct tcp_cookie_secret *tcp_secret_retiring; | |
3108 | static struct tcp_cookie_secret *tcp_secret_secondary; | |
3109 | ||
3110 | static DEFINE_SPINLOCK(tcp_secret_locker); | |
3111 | ||
3112 | /* Select a pseudo-random word in the cookie workspace. | |
3113 | */ | |
3114 | static inline u32 tcp_cookie_work(const u32 *ws, const int n) | |
3115 | { | |
3116 | return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])]; | |
3117 | } | |
3118 | ||
3119 | /* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed. | |
3120 | * Called in softirq context. | |
3121 | * Returns: 0 for success. | |
3122 | */ | |
3123 | int tcp_cookie_generator(u32 *bakery) | |
3124 | { | |
3125 | unsigned long jiffy = jiffies; | |
3126 | ||
3127 | if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) { | |
3128 | spin_lock_bh(&tcp_secret_locker); | |
3129 | if (!time_after_eq(jiffy, tcp_secret_generating->expires)) { | |
3130 | /* refreshed by another */ | |
3131 | memcpy(bakery, | |
3132 | &tcp_secret_generating->secrets[0], | |
3133 | COOKIE_WORKSPACE_WORDS); | |
3134 | } else { | |
3135 | /* still needs refreshing */ | |
3136 | get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS); | |
3137 | ||
3138 | /* The first time, paranoia assumes that the | |
3139 | * randomization function isn't as strong. But, | |
3140 | * this secret initialization is delayed until | |
3141 | * the last possible moment (packet arrival). | |
3142 | * Although that time is observable, it is | |
3143 | * unpredictably variable. Mash in the most | |
3144 | * volatile clock bits available, and expire the | |
3145 | * secret extra quickly. | |
3146 | */ | |
3147 | if (unlikely(tcp_secret_primary->expires == | |
3148 | tcp_secret_secondary->expires)) { | |
3149 | struct timespec tv; | |
3150 | ||
3151 | getnstimeofday(&tv); | |
3152 | bakery[COOKIE_DIGEST_WORDS+0] ^= | |
3153 | (u32)tv.tv_nsec; | |
3154 | ||
3155 | tcp_secret_secondary->expires = jiffy | |
3156 | + TCP_SECRET_1MSL | |
3157 | + (0x0f & tcp_cookie_work(bakery, 0)); | |
3158 | } else { | |
3159 | tcp_secret_secondary->expires = jiffy | |
3160 | + TCP_SECRET_LIFE | |
3161 | + (0xff & tcp_cookie_work(bakery, 1)); | |
3162 | tcp_secret_primary->expires = jiffy | |
3163 | + TCP_SECRET_2MSL | |
3164 | + (0x1f & tcp_cookie_work(bakery, 2)); | |
3165 | } | |
3166 | memcpy(&tcp_secret_secondary->secrets[0], | |
3167 | bakery, COOKIE_WORKSPACE_WORDS); | |
3168 | ||
3169 | rcu_assign_pointer(tcp_secret_generating, | |
3170 | tcp_secret_secondary); | |
3171 | rcu_assign_pointer(tcp_secret_retiring, | |
3172 | tcp_secret_primary); | |
3173 | /* | |
3174 | * Neither call_rcu() nor synchronize_rcu() needed. | |
3175 | * Retiring data is not freed. It is replaced after | |
3176 | * further (locked) pointer updates, and a quiet time | |
3177 | * (minimum 1MSL, maximum LIFE - 2MSL). | |
3178 | */ | |
3179 | } | |
3180 | spin_unlock_bh(&tcp_secret_locker); | |
3181 | } else { | |
3182 | rcu_read_lock_bh(); | |
3183 | memcpy(bakery, | |
3184 | &rcu_dereference(tcp_secret_generating)->secrets[0], | |
3185 | COOKIE_WORKSPACE_WORDS); | |
3186 | rcu_read_unlock_bh(); | |
3187 | } | |
3188 | return 0; | |
3189 | } | |
3190 | EXPORT_SYMBOL(tcp_cookie_generator); | |
3191 | ||
3192 | void tcp_done(struct sock *sk) | |
3193 | { | |
3194 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | |
3195 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); | |
3196 | ||
3197 | tcp_set_state(sk, TCP_CLOSE); | |
3198 | tcp_clear_xmit_timers(sk); | |
3199 | ||
3200 | sk->sk_shutdown = SHUTDOWN_MASK; | |
3201 | ||
3202 | if (!sock_flag(sk, SOCK_DEAD)) | |
3203 | sk->sk_state_change(sk); | |
3204 | else | |
3205 | inet_csk_destroy_sock(sk); | |
3206 | } | |
3207 | EXPORT_SYMBOL_GPL(tcp_done); | |
3208 | ||
3209 | extern struct tcp_congestion_ops tcp_reno; | |
3210 | ||
3211 | static __initdata unsigned long thash_entries; | |
3212 | static int __init set_thash_entries(char *str) | |
3213 | { | |
3214 | if (!str) | |
3215 | return 0; | |
3216 | thash_entries = simple_strtoul(str, &str, 0); | |
3217 | return 1; | |
3218 | } | |
3219 | __setup("thash_entries=", set_thash_entries); | |
3220 | ||
3221 | void __init tcp_init(void) | |
3222 | { | |
3223 | struct sk_buff *skb = NULL; | |
3224 | unsigned long nr_pages, limit; | |
3225 | int i, max_share, cnt; | |
3226 | unsigned long jiffy = jiffies; | |
3227 | ||
3228 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); | |
3229 | ||
3230 | percpu_counter_init(&tcp_sockets_allocated, 0); | |
3231 | percpu_counter_init(&tcp_orphan_count, 0); | |
3232 | tcp_hashinfo.bind_bucket_cachep = | |
3233 | kmem_cache_create("tcp_bind_bucket", | |
3234 | sizeof(struct inet_bind_bucket), 0, | |
3235 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | |
3236 | ||
3237 | /* Size and allocate the main established and bind bucket | |
3238 | * hash tables. | |
3239 | * | |
3240 | * The methodology is similar to that of the buffer cache. | |
3241 | */ | |
3242 | tcp_hashinfo.ehash = | |
3243 | alloc_large_system_hash("TCP established", | |
3244 | sizeof(struct inet_ehash_bucket), | |
3245 | thash_entries, | |
3246 | (totalram_pages >= 128 * 1024) ? | |
3247 | 13 : 15, | |
3248 | 0, | |
3249 | NULL, | |
3250 | &tcp_hashinfo.ehash_mask, | |
3251 | thash_entries ? 0 : 512 * 1024); | |
3252 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { | |
3253 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); | |
3254 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); | |
3255 | } | |
3256 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) | |
3257 | panic("TCP: failed to alloc ehash_locks"); | |
3258 | tcp_hashinfo.bhash = | |
3259 | alloc_large_system_hash("TCP bind", | |
3260 | sizeof(struct inet_bind_hashbucket), | |
3261 | tcp_hashinfo.ehash_mask + 1, | |
3262 | (totalram_pages >= 128 * 1024) ? | |
3263 | 13 : 15, | |
3264 | 0, | |
3265 | &tcp_hashinfo.bhash_size, | |
3266 | NULL, | |
3267 | 64 * 1024); | |
3268 | tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; | |
3269 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { | |
3270 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); | |
3271 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | |
3272 | } | |
3273 | ||
3274 | ||
3275 | cnt = tcp_hashinfo.ehash_mask + 1; | |
3276 | ||
3277 | tcp_death_row.sysctl_max_tw_buckets = cnt / 2; | |
3278 | sysctl_tcp_max_orphans = cnt / 2; | |
3279 | sysctl_max_syn_backlog = max(128, cnt / 256); | |
3280 | ||
3281 | /* Set the pressure threshold to be a fraction of global memory that | |
3282 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of | |
3283 | * memory, with a floor of 128 pages. | |
3284 | */ | |
3285 | nr_pages = totalram_pages - totalhigh_pages; | |
3286 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | |
3287 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | |
3288 | limit = max(limit, 128UL); | |
3289 | sysctl_tcp_mem[0] = limit / 4 * 3; | |
3290 | sysctl_tcp_mem[1] = limit; | |
3291 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; | |
3292 | ||
3293 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ | |
3294 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); | |
3295 | max_share = min(4UL*1024*1024, limit); | |
3296 | ||
3297 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; | |
3298 | sysctl_tcp_wmem[1] = 16*1024; | |
3299 | sysctl_tcp_wmem[2] = max(64*1024, max_share); | |
3300 | ||
3301 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; | |
3302 | sysctl_tcp_rmem[1] = 87380; | |
3303 | sysctl_tcp_rmem[2] = max(87380, max_share); | |
3304 | ||
3305 | printk(KERN_INFO "TCP: Hash tables configured " | |
3306 | "(established %u bind %u)\n", | |
3307 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); | |
3308 | ||
3309 | tcp_register_congestion_control(&tcp_reno); | |
3310 | ||
3311 | memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); | |
3312 | memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets)); | |
3313 | tcp_secret_one.expires = jiffy; /* past due */ | |
3314 | tcp_secret_two.expires = jiffy; /* past due */ | |
3315 | tcp_secret_generating = &tcp_secret_one; | |
3316 | tcp_secret_primary = &tcp_secret_one; | |
3317 | tcp_secret_retiring = &tcp_secret_two; | |
3318 | tcp_secret_secondary = &tcp_secret_two; | |
3319 | } |