]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/sock.c
net/core: add lock context change annotations in net/core/sock.c
[net-next-2.6.git] / net / core / sock.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
4ec93edb 35 * code. The ACK stuff can wait and needs major
1da177e4
LT
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
4fc268d2 92#include <linux/capability.h>
1da177e4
LT
93#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
1da177e4
LT
98#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
a1f8e7f7 112#include <linux/highmem.h>
3f551f94 113#include <linux/user_namespace.h>
1da177e4
LT
114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
457c4cbc 121#include <net/net_namespace.h>
2e6599cb 122#include <net/request_sock.h>
1da177e4 123#include <net/sock.h>
20d49473 124#include <linux/net_tstamp.h>
1da177e4
LT
125#include <net/xfrm.h>
126#include <linux/ipsec.h>
f8451725 127#include <net/cls_cgroup.h>
1da177e4
LT
128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
da21f24d
IM
135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
a5b5bb9a
IM
139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
a5b5bb9a
IM
142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
36cbd3dc 147static const char *const af_family_key_strings[AF_MAX+1] = {
a5b5bb9a
IM
148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
cbd151bf 155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
a5b5bb9a 156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
cd05acfe 157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
17926a79 158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
bce7b154 159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
fe33147a 160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
bce7b154 161 "sk_lock-AF_MAX"
a5b5bb9a 162};
36cbd3dc 163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
a5b5bb9a
IM
164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
cbd151bf 171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
a5b5bb9a 172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
cd05acfe 173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
17926a79 174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
bce7b154 175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
fe33147a 176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
bce7b154 177 "slock-AF_MAX"
a5b5bb9a 178};
36cbd3dc 179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
443aef0e
PZ
180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
cbd151bf 187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
443aef0e 188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
b4942af6 189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
e51f802b 190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
bce7b154 191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
fe33147a 192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
bce7b154 193 "clock-AF_MAX"
443aef0e 194};
da21f24d
IM
195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
1da177e4
LT
202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS 256
208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
ab32ea5d
BH
213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
1da177e4
LT
217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
ab32ea5d 219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
2a91525c 220EXPORT_SYMBOL(sysctl_optmem_max);
1da177e4 221
f8451725
HX
222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
1da177e4
LT
227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229 struct timeval tv;
230
231 if (optlen < sizeof(tv))
232 return -EINVAL;
233 if (copy_from_user(&tv, optval, sizeof(tv)))
234 return -EFAULT;
ba78073e
VA
235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236 return -EDOM;
1da177e4 237
ba78073e 238 if (tv.tv_sec < 0) {
6f11df83
AM
239 static int warned __read_mostly;
240
ba78073e 241 *timeo_p = 0;
50aab54f 242 if (warned < 10 && net_ratelimit()) {
ba78073e
VA
243 warned++;
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
ba25f9dc 246 current->comm, task_pid_nr(current));
50aab54f 247 }
ba78073e
VA
248 return 0;
249 }
1da177e4
LT
250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
252 return 0;
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255 return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260 static int warned;
261 static char warncomm[TASK_COMM_LEN];
4ec93edb
YH
262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
1da177e4
LT
264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
266 warned++;
267 }
268}
269
20d49473 270static void sock_disable_timestamp(struct sock *sk, int flag)
4ec93edb 271{
20d49473
PO
272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
277 }
1da177e4
LT
278 }
279}
280
281
f0088a50
DV
282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
766e9037 284 int err;
f0088a50 285 int skb_len;
3b885787
NH
286 unsigned long flags;
287 struct sk_buff_head *list = &sk->sk_receive_queue;
f0088a50 288
9ee6b7f1 289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
f0088a50
DV
290 number of warnings when compiling with -W --ANK
291 */
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
766e9037
ED
294 atomic_inc(&sk->sk_drops);
295 return -ENOMEM;
f0088a50
DV
296 }
297
fda9ef5d 298 err = sk_filter(sk, skb);
f0088a50 299 if (err)
766e9037 300 return err;
f0088a50 301
3ab224be 302 if (!sk_rmem_schedule(sk, skb->truesize)) {
766e9037
ED
303 atomic_inc(&sk->sk_drops);
304 return -ENOBUFS;
3ab224be
HA
305 }
306
f0088a50
DV
307 skb->dev = NULL;
308 skb_set_owner_r(skb, sk);
49ad9599 309
f0088a50
DV
310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
313 * from the queue.
314 */
315 skb_len = skb->len;
316
7fee226a
ED
317 /* we escape from rcu protected region, make sure we dont leak
318 * a norefcounted dst
319 */
320 skb_dst_force(skb);
321
3b885787
NH
322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
f0088a50
DV
326
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
766e9037 329 return 0;
f0088a50
DV
330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
58a5a7b9 333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
f0088a50
DV
334{
335 int rc = NET_RX_SUCCESS;
336
fda9ef5d 337 if (sk_filter(sk, skb))
f0088a50
DV
338 goto discard_and_relse;
339
340 skb->dev = NULL;
341
c377411f
ED
342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
345 }
58a5a7b9
ACM
346 if (nested)
347 bh_lock_sock_nested(sk);
348 else
349 bh_lock_sock(sk);
a5b5bb9a
IM
350 if (!sock_owned_by_user(sk)) {
351 /*
352 * trylock + unlock semantics:
353 */
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
c57943a1 356 rc = sk_backlog_rcv(sk, skb);
a5b5bb9a
IM
357
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
a3a858ff 359 } else if (sk_add_backlog(sk, skb)) {
8eae939f
ZY
360 bh_unlock_sock(sk);
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
363 }
364
f0088a50
DV
365 bh_unlock_sock(sk);
366out:
367 sock_put(sk);
368 return rc;
369discard_and_relse:
370 kfree_skb(skb);
371 goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
ea94ff3b
KK
375void sk_reset_txq(struct sock *sk)
376{
377 sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
f0088a50
DV
381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
b6c6712a 383 struct dst_entry *dst = __sk_dst_get(sk);
f0088a50
DV
384
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
e022f0b4 386 sk_tx_queue_clear(sk);
b6c6712a 387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
f0088a50
DV
388 dst_release(dst);
389 return NULL;
390 }
391
392 return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398 struct dst_entry *dst = sk_dst_get(sk);
399
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401 sk_dst_reset(sk);
402 dst_release(dst);
403 return NULL;
404 }
405
406 return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
4878809f
DM
410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412 int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
3b1e0a65 414 struct net *net = sock_net(sk);
4878809f
DM
415 char devname[IFNAMSIZ];
416 int index;
417
418 /* Sorry... */
419 ret = -EPERM;
420 if (!capable(CAP_NET_RAW))
421 goto out;
422
423 ret = -EINVAL;
424 if (optlen < 0)
425 goto out;
426
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
430 * is not bound.
431 */
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
435
436 ret = -EFAULT;
437 if (copy_from_user(devname, optval, optlen))
438 goto out;
439
000ba2e4
DM
440 index = 0;
441 if (devname[0] != '\0') {
bf8e56bf 442 struct net_device *dev;
4878809f 443
bf8e56bf
ED
444 rcu_read_lock();
445 dev = dev_get_by_name_rcu(net, devname);
446 if (dev)
447 index = dev->ifindex;
448 rcu_read_unlock();
4878809f
DM
449 ret = -ENODEV;
450 if (!dev)
451 goto out;
4878809f
DM
452 }
453
454 lock_sock(sk);
455 sk->sk_bound_dev_if = index;
456 sk_dst_reset(sk);
457 release_sock(sk);
458
459 ret = 0;
460
461out:
462#endif
463
464 return ret;
465}
466
c0ef877b
PE
467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469 if (valbool)
470 sock_set_flag(sk, bit);
471 else
472 sock_reset_flag(sk, bit);
473}
474
1da177e4
LT
475/*
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
b7058842 481 char __user *optval, unsigned int optlen)
1da177e4 482{
2a91525c 483 struct sock *sk = sock->sk;
1da177e4
LT
484 int val;
485 int valbool;
486 struct linger ling;
487 int ret = 0;
4ec93edb 488
1da177e4
LT
489 /*
490 * Options without arguments
491 */
492
4878809f
DM
493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
495
e71a4783
SH
496 if (optlen < sizeof(int))
497 return -EINVAL;
4ec93edb 498
1da177e4
LT
499 if (get_user(val, (int __user *)optval))
500 return -EFAULT;
4ec93edb 501
2a91525c 502 valbool = val ? 1 : 0;
1da177e4
LT
503
504 lock_sock(sk);
505
2a91525c 506 switch (optname) {
e71a4783 507 case SO_DEBUG:
2a91525c 508 if (val && !capable(CAP_NET_ADMIN))
e71a4783 509 ret = -EACCES;
2a91525c 510 else
c0ef877b 511 sock_valbool_flag(sk, SOCK_DBG, valbool);
e71a4783
SH
512 break;
513 case SO_REUSEADDR:
514 sk->sk_reuse = valbool;
515 break;
516 case SO_TYPE:
49c794e9 517 case SO_PROTOCOL:
0d6038ee 518 case SO_DOMAIN:
e71a4783
SH
519 case SO_ERROR:
520 ret = -ENOPROTOOPT;
521 break;
522 case SO_DONTROUTE:
c0ef877b 523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
e71a4783
SH
524 break;
525 case SO_BROADCAST:
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527 break;
528 case SO_SNDBUF:
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
533
534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
b0573dea 536set_sndbuf:
e71a4783
SH
537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540 else
541 sk->sk_sndbuf = val * 2;
1da177e4 542
e71a4783
SH
543 /*
544 * Wake up sending tasks if we
545 * upped the value.
546 */
547 sk->sk_write_space(sk);
548 break;
1da177e4 549
e71a4783
SH
550 case SO_SNDBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
553 break;
554 }
555 goto set_sndbuf;
b0573dea 556
e71a4783
SH
557 case SO_RCVBUF:
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
4ec93edb 562
e71a4783
SH
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
b0573dea 565set_rcvbuf:
e71a4783
SH
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 /*
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
572 * socket.
573 *
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
577 *
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
581 */
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584 else
585 sk->sk_rcvbuf = val * 2;
586 break;
587
588 case SO_RCVBUFFORCE:
589 if (!capable(CAP_NET_ADMIN)) {
590 ret = -EPERM;
1da177e4 591 break;
e71a4783
SH
592 }
593 goto set_rcvbuf;
1da177e4 594
e71a4783 595 case SO_KEEPALIVE:
1da177e4 596#ifdef CONFIG_INET
e71a4783
SH
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
1da177e4 599#endif
e71a4783
SH
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601 break;
602
603 case SO_OOBINLINE:
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605 break;
606
607 case SO_NO_CHECK:
608 sk->sk_no_check = valbool;
609 break;
610
611 case SO_PRIORITY:
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
614 else
615 ret = -EPERM;
616 break;
617
618 case SO_LINGER:
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
1da177e4 621 break;
e71a4783 622 }
2a91525c 623 if (copy_from_user(&ling, optval, sizeof(ling))) {
e71a4783 624 ret = -EFAULT;
1da177e4 625 break;
e71a4783
SH
626 }
627 if (!ling.l_onoff)
628 sock_reset_flag(sk, SOCK_LINGER);
629 else {
1da177e4 630#if (BITS_PER_LONG == 32)
e71a4783
SH
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1da177e4 633 else
e71a4783
SH
634#endif
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
637 }
638 break;
639
640 case SO_BSDCOMPAT:
641 sock_warn_obsolete_bsdism("setsockopt");
642 break;
643
644 case SO_PASSCRED:
645 if (valbool)
646 set_bit(SOCK_PASSCRED, &sock->flags);
647 else
648 clear_bit(SOCK_PASSCRED, &sock->flags);
649 break;
650
651 case SO_TIMESTAMP:
92f37fd2 652 case SO_TIMESTAMPNS:
e71a4783 653 if (valbool) {
92f37fd2
ED
654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656 else
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783 658 sock_set_flag(sk, SOCK_RCVTSTAMP);
20d49473 659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
92f37fd2 660 } else {
e71a4783 661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
92f37fd2
ED
662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663 }
e71a4783
SH
664 break;
665
20d49473
PO
666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
f249fb78 668 ret = -EINVAL;
20d49473
PO
669 break;
670 }
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
680 else
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
689 break;
690
e71a4783
SH
691 case SO_RCVLOWAT:
692 if (val < 0)
693 val = INT_MAX;
694 sk->sk_rcvlowat = val ? : 1;
695 break;
696
697 case SO_RCVTIMEO:
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699 break;
700
701 case SO_SNDTIMEO:
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703 break;
1da177e4 704
e71a4783
SH
705 case SO_ATTACH_FILTER:
706 ret = -EINVAL;
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
1da177e4 709
e71a4783
SH
710 ret = -EFAULT;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1da177e4 712 break;
e71a4783
SH
713
714 ret = sk_attach_filter(&fprog, sk);
715 }
716 break;
717
718 case SO_DETACH_FILTER:
55b33325 719 ret = sk_detach_filter(sk);
e71a4783 720 break;
1da177e4 721
e71a4783
SH
722 case SO_PASSSEC:
723 if (valbool)
724 set_bit(SOCK_PASSSEC, &sock->flags);
725 else
726 clear_bit(SOCK_PASSSEC, &sock->flags);
727 break;
4a19ec58
LAT
728 case SO_MARK:
729 if (!capable(CAP_NET_ADMIN))
730 ret = -EPERM;
2a91525c 731 else
4a19ec58 732 sk->sk_mark = val;
4a19ec58 733 break;
877ce7c1 734
1da177e4
LT
735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
3b885787
NH
737 case SO_RXQ_OVFL:
738 if (valbool)
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
740 else
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
742 break;
e71a4783
SH
743 default:
744 ret = -ENOPROTOOPT;
745 break;
4ec93edb 746 }
1da177e4
LT
747 release_sock(sk);
748 return ret;
749}
2a91525c 750EXPORT_SYMBOL(sock_setsockopt);
1da177e4
LT
751
752
3f551f94
EB
753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
3924773a 765EXPORT_SYMBOL_GPL(cred_to_ucred);
3f551f94 766
1da177e4
LT
767int sock_getsockopt(struct socket *sock, int level, int optname,
768 char __user *optval, int __user *optlen)
769{
770 struct sock *sk = sock->sk;
4ec93edb 771
e71a4783 772 union {
4ec93edb
YH
773 int val;
774 struct linger ling;
1da177e4
LT
775 struct timeval tm;
776 } v;
4ec93edb 777
4d0392be 778 int lv = sizeof(int);
1da177e4 779 int len;
4ec93edb 780
e71a4783 781 if (get_user(len, optlen))
4ec93edb 782 return -EFAULT;
e71a4783 783 if (len < 0)
1da177e4 784 return -EINVAL;
4ec93edb 785
50fee1de 786 memset(&v, 0, sizeof(v));
df0bca04 787
2a91525c 788 switch (optname) {
e71a4783
SH
789 case SO_DEBUG:
790 v.val = sock_flag(sk, SOCK_DBG);
791 break;
792
793 case SO_DONTROUTE:
794 v.val = sock_flag(sk, SOCK_LOCALROUTE);
795 break;
796
797 case SO_BROADCAST:
798 v.val = !!sock_flag(sk, SOCK_BROADCAST);
799 break;
800
801 case SO_SNDBUF:
802 v.val = sk->sk_sndbuf;
803 break;
804
805 case SO_RCVBUF:
806 v.val = sk->sk_rcvbuf;
807 break;
808
809 case SO_REUSEADDR:
810 v.val = sk->sk_reuse;
811 break;
812
813 case SO_KEEPALIVE:
814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
815 break;
816
817 case SO_TYPE:
818 v.val = sk->sk_type;
819 break;
820
49c794e9
JE
821 case SO_PROTOCOL:
822 v.val = sk->sk_protocol;
823 break;
824
0d6038ee
JE
825 case SO_DOMAIN:
826 v.val = sk->sk_family;
827 break;
828
e71a4783
SH
829 case SO_ERROR:
830 v.val = -sock_error(sk);
2a91525c 831 if (v.val == 0)
e71a4783
SH
832 v.val = xchg(&sk->sk_err_soft, 0);
833 break;
834
835 case SO_OOBINLINE:
836 v.val = !!sock_flag(sk, SOCK_URGINLINE);
837 break;
838
839 case SO_NO_CHECK:
840 v.val = sk->sk_no_check;
841 break;
842
843 case SO_PRIORITY:
844 v.val = sk->sk_priority;
845 break;
846
847 case SO_LINGER:
848 lv = sizeof(v.ling);
849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
850 v.ling.l_linger = sk->sk_lingertime / HZ;
851 break;
852
853 case SO_BSDCOMPAT:
854 sock_warn_obsolete_bsdism("getsockopt");
855 break;
856
857 case SO_TIMESTAMP:
92f37fd2
ED
858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859 !sock_flag(sk, SOCK_RCVTSTAMPNS);
860 break;
861
862 case SO_TIMESTAMPNS:
863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783
SH
864 break;
865
20d49473
PO
866 case SO_TIMESTAMPING:
867 v.val = 0;
868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877 v.val |= SOF_TIMESTAMPING_SOFTWARE;
878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
882 break;
883
e71a4783 884 case SO_RCVTIMEO:
2a91525c 885 lv = sizeof(struct timeval);
e71a4783
SH
886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
887 v.tm.tv_sec = 0;
888 v.tm.tv_usec = 0;
889 } else {
890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
892 }
893 break;
894
895 case SO_SNDTIMEO:
2a91525c 896 lv = sizeof(struct timeval);
e71a4783
SH
897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
898 v.tm.tv_sec = 0;
899 v.tm.tv_usec = 0;
900 } else {
901 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
903 }
904 break;
1da177e4 905
e71a4783
SH
906 case SO_RCVLOWAT:
907 v.val = sk->sk_rcvlowat;
908 break;
1da177e4 909
e71a4783 910 case SO_SNDLOWAT:
2a91525c 911 v.val = 1;
e71a4783 912 break;
1da177e4 913
e71a4783
SH
914 case SO_PASSCRED:
915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
916 break;
1da177e4 917
e71a4783 918 case SO_PEERCRED:
109f6e39
EB
919 {
920 struct ucred peercred;
921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
e71a4783
SH
925 return -EFAULT;
926 goto lenout;
109f6e39 927 }
1da177e4 928
e71a4783
SH
929 case SO_PEERNAME:
930 {
931 char address[128];
932
933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
934 return -ENOTCONN;
935 if (lv < len)
936 return -EINVAL;
937 if (copy_to_user(optval, address, len))
938 return -EFAULT;
939 goto lenout;
940 }
1da177e4 941
e71a4783
SH
942 /* Dubious BSD thing... Probably nobody even uses it, but
943 * the UNIX standard wants it for whatever reason... -DaveM
944 */
945 case SO_ACCEPTCONN:
946 v.val = sk->sk_state == TCP_LISTEN;
947 break;
1da177e4 948
e71a4783
SH
949 case SO_PASSSEC:
950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
951 break;
877ce7c1 952
e71a4783
SH
953 case SO_PEERSEC:
954 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1da177e4 955
4a19ec58
LAT
956 case SO_MARK:
957 v.val = sk->sk_mark;
958 break;
959
3b885787
NH
960 case SO_RXQ_OVFL:
961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
962 break;
963
e71a4783
SH
964 default:
965 return -ENOPROTOOPT;
1da177e4 966 }
e71a4783 967
1da177e4
LT
968 if (len > lv)
969 len = lv;
970 if (copy_to_user(optval, &v, len))
971 return -EFAULT;
972lenout:
4ec93edb
YH
973 if (put_user(len, optlen))
974 return -EFAULT;
975 return 0;
1da177e4
LT
976}
977
a5b5bb9a
IM
978/*
979 * Initialize an sk_lock.
980 *
981 * (We also register the sk_lock with the lock validator.)
982 */
b6f99a21 983static inline void sock_lock_init(struct sock *sk)
a5b5bb9a 984{
ed07536e
PZ
985 sock_lock_init_class_and_name(sk,
986 af_family_slock_key_strings[sk->sk_family],
987 af_family_slock_keys + sk->sk_family,
988 af_family_key_strings[sk->sk_family],
989 af_family_keys + sk->sk_family);
a5b5bb9a
IM
990}
991
4dc6dc71
ED
992/*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 */
f1a6c4da
PE
996static void sock_copy(struct sock *nsk, const struct sock *osk)
997{
998#ifdef CONFIG_SECURITY_NETWORK
999 void *sptr = nsk->sk_security;
1000#endif
4dc6dc71 1001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
e022f0b4
KK
1002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1003 sizeof(osk->sk_tx_queue_mapping));
4dc6dc71
ED
1004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
f1a6c4da
PE
1006#ifdef CONFIG_SECURITY_NETWORK
1007 nsk->sk_security = sptr;
1008 security_sk_clone(osk, nsk);
1009#endif
1010}
1011
2e4afe7b
PE
1012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013 int family)
c308c1b2
PE
1014{
1015 struct sock *sk;
1016 struct kmem_cache *slab;
1017
1018 slab = prot->slab;
e912b114
ED
1019 if (slab != NULL) {
1020 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1021 if (!sk)
1022 return sk;
1023 if (priority & __GFP_ZERO) {
1024 /*
1025 * caches using SLAB_DESTROY_BY_RCU should let
1026 * sk_node.next un-modified. Special care is taken
1027 * when initializing object to zero.
1028 */
1029 if (offsetof(struct sock, sk_node.next) != 0)
1030 memset(sk, 0, offsetof(struct sock, sk_node.next));
1031 memset(&sk->sk_node.pprev, 0,
1032 prot->obj_size - offsetof(struct sock,
1033 sk_node.pprev));
1034 }
1035 }
c308c1b2
PE
1036 else
1037 sk = kmalloc(prot->obj_size, priority);
1038
2e4afe7b 1039 if (sk != NULL) {
a98b65a3
VN
1040 kmemcheck_annotate_bitfield(sk, flags);
1041
2e4afe7b
PE
1042 if (security_sk_alloc(sk, family, priority))
1043 goto out_free;
1044
1045 if (!try_module_get(prot->owner))
1046 goto out_free_sec;
e022f0b4 1047 sk_tx_queue_clear(sk);
2e4afe7b
PE
1048 }
1049
c308c1b2 1050 return sk;
2e4afe7b
PE
1051
1052out_free_sec:
1053 security_sk_free(sk);
1054out_free:
1055 if (slab != NULL)
1056 kmem_cache_free(slab, sk);
1057 else
1058 kfree(sk);
1059 return NULL;
c308c1b2
PE
1060}
1061
1062static void sk_prot_free(struct proto *prot, struct sock *sk)
1063{
1064 struct kmem_cache *slab;
2e4afe7b 1065 struct module *owner;
c308c1b2 1066
2e4afe7b 1067 owner = prot->owner;
c308c1b2 1068 slab = prot->slab;
2e4afe7b
PE
1069
1070 security_sk_free(sk);
c308c1b2
PE
1071 if (slab != NULL)
1072 kmem_cache_free(slab, sk);
1073 else
1074 kfree(sk);
2e4afe7b 1075 module_put(owner);
c308c1b2
PE
1076}
1077
f8451725
HX
1078#ifdef CONFIG_CGROUPS
1079void sock_update_classid(struct sock *sk)
1080{
1081 u32 classid = task_cls_classid(current);
1082
1083 if (classid && classid != sk->sk_classid)
1084 sk->sk_classid = classid;
1085}
82862742 1086EXPORT_SYMBOL(sock_update_classid);
f8451725
HX
1087#endif
1088
1da177e4
LT
1089/**
1090 * sk_alloc - All socket objects are allocated here
c4ea43c5 1091 * @net: the applicable net namespace
4dc3b16b
PP
1092 * @family: protocol family
1093 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1094 * @prot: struct proto associated with this new sock instance
1da177e4 1095 */
1b8d7ae4 1096struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
6257ff21 1097 struct proto *prot)
1da177e4 1098{
c308c1b2 1099 struct sock *sk;
1da177e4 1100
154adbc8 1101 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1da177e4 1102 if (sk) {
154adbc8
PE
1103 sk->sk_family = family;
1104 /*
1105 * See comment in struct sock definition to understand
1106 * why we need sk_prot_creator -acme
1107 */
1108 sk->sk_prot = sk->sk_prot_creator = prot;
1109 sock_lock_init(sk);
3b1e0a65 1110 sock_net_set(sk, get_net(net));
d66ee058 1111 atomic_set(&sk->sk_wmem_alloc, 1);
f8451725
HX
1112
1113 sock_update_classid(sk);
1da177e4 1114 }
a79af59e 1115
2e4afe7b 1116 return sk;
1da177e4 1117}
2a91525c 1118EXPORT_SYMBOL(sk_alloc);
1da177e4 1119
2b85a34e 1120static void __sk_free(struct sock *sk)
1da177e4
LT
1121{
1122 struct sk_filter *filter;
1da177e4
LT
1123
1124 if (sk->sk_destruct)
1125 sk->sk_destruct(sk);
1126
a898def2
PM
1127 filter = rcu_dereference_check(sk->sk_filter,
1128 atomic_read(&sk->sk_wmem_alloc) == 0);
1da177e4 1129 if (filter) {
309dd5fc 1130 sk_filter_uncharge(sk, filter);
fda9ef5d 1131 rcu_assign_pointer(sk->sk_filter, NULL);
1da177e4
LT
1132 }
1133
20d49473
PO
1134 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1135 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1da177e4
LT
1136
1137 if (atomic_read(&sk->sk_omem_alloc))
1138 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
0dc47877 1139 __func__, atomic_read(&sk->sk_omem_alloc));
1da177e4 1140
109f6e39
EB
1141 if (sk->sk_peer_cred)
1142 put_cred(sk->sk_peer_cred);
1143 put_pid(sk->sk_peer_pid);
3b1e0a65 1144 put_net(sock_net(sk));
c308c1b2 1145 sk_prot_free(sk->sk_prot_creator, sk);
1da177e4 1146}
2b85a34e
ED
1147
1148void sk_free(struct sock *sk)
1149{
1150 /*
1151 * We substract one from sk_wmem_alloc and can know if
1152 * some packets are still in some tx queue.
1153 * If not null, sock_wfree() will call __sk_free(sk) later
1154 */
1155 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1156 __sk_free(sk);
1157}
2a91525c 1158EXPORT_SYMBOL(sk_free);
1da177e4 1159
edf02087
DL
1160/*
1161 * Last sock_put should drop referrence to sk->sk_net. It has already
1162 * been dropped in sk_change_net. Taking referrence to stopping namespace
1163 * is not an option.
1164 * Take referrence to a socket to remove it from hash _alive_ and after that
1165 * destroy it in the context of init_net.
1166 */
1167void sk_release_kernel(struct sock *sk)
1168{
1169 if (sk == NULL || sk->sk_socket == NULL)
1170 return;
1171
1172 sock_hold(sk);
1173 sock_release(sk->sk_socket);
65a18ec5 1174 release_net(sock_net(sk));
3b1e0a65 1175 sock_net_set(sk, get_net(&init_net));
edf02087
DL
1176 sock_put(sk);
1177}
45af1754 1178EXPORT_SYMBOL(sk_release_kernel);
edf02087 1179
dd0fc66f 1180struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
87d11ceb 1181{
8fd1d178 1182 struct sock *newsk;
87d11ceb 1183
8fd1d178 1184 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
87d11ceb
ACM
1185 if (newsk != NULL) {
1186 struct sk_filter *filter;
1187
892c141e 1188 sock_copy(newsk, sk);
87d11ceb
ACM
1189
1190 /* SANITY */
3b1e0a65 1191 get_net(sock_net(newsk));
87d11ceb
ACM
1192 sk_node_init(&newsk->sk_node);
1193 sock_lock_init(newsk);
1194 bh_lock_sock(newsk);
fa438ccf 1195 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
8eae939f 1196 newsk->sk_backlog.len = 0;
87d11ceb
ACM
1197
1198 atomic_set(&newsk->sk_rmem_alloc, 0);
2b85a34e
ED
1199 /*
1200 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1201 */
1202 atomic_set(&newsk->sk_wmem_alloc, 1);
87d11ceb
ACM
1203 atomic_set(&newsk->sk_omem_alloc, 0);
1204 skb_queue_head_init(&newsk->sk_receive_queue);
1205 skb_queue_head_init(&newsk->sk_write_queue);
97fc2f08
CL
1206#ifdef CONFIG_NET_DMA
1207 skb_queue_head_init(&newsk->sk_async_wait_queue);
1208#endif
87d11ceb 1209
b6c6712a 1210 spin_lock_init(&newsk->sk_dst_lock);
87d11ceb 1211 rwlock_init(&newsk->sk_callback_lock);
443aef0e
PZ
1212 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1213 af_callback_keys + newsk->sk_family,
1214 af_family_clock_key_strings[newsk->sk_family]);
87d11ceb
ACM
1215
1216 newsk->sk_dst_cache = NULL;
1217 newsk->sk_wmem_queued = 0;
1218 newsk->sk_forward_alloc = 0;
1219 newsk->sk_send_head = NULL;
87d11ceb
ACM
1220 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1221
1222 sock_reset_flag(newsk, SOCK_DONE);
1223 skb_queue_head_init(&newsk->sk_error_queue);
1224
1225 filter = newsk->sk_filter;
1226 if (filter != NULL)
1227 sk_filter_charge(newsk, filter);
1228
1229 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1230 /* It is still raw copy of parent, so invalidate
1231 * destructor and make plain sk_free() */
1232 newsk->sk_destruct = NULL;
1233 sk_free(newsk);
1234 newsk = NULL;
1235 goto out;
1236 }
1237
1238 newsk->sk_err = 0;
1239 newsk->sk_priority = 0;
4dc6dc71
ED
1240 /*
1241 * Before updating sk_refcnt, we must commit prior changes to memory
1242 * (Documentation/RCU/rculist_nulls.txt for details)
1243 */
1244 smp_wmb();
87d11ceb
ACM
1245 atomic_set(&newsk->sk_refcnt, 2);
1246
1247 /*
1248 * Increment the counter in the same struct proto as the master
1249 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1250 * is the same as sk->sk_prot->socks, as this field was copied
1251 * with memcpy).
1252 *
1253 * This _changes_ the previous behaviour, where
1254 * tcp_create_openreq_child always was incrementing the
1255 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1256 * to be taken into account in all callers. -acme
1257 */
1258 sk_refcnt_debug_inc(newsk);
972692e0 1259 sk_set_socket(newsk, NULL);
43815482 1260 newsk->sk_wq = NULL;
87d11ceb
ACM
1261
1262 if (newsk->sk_prot->sockets_allocated)
1748376b 1263 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
704da560
OP
1264
1265 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1266 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1267 net_enable_timestamp();
87d11ceb
ACM
1268 }
1269out:
1270 return newsk;
1271}
87d11ceb
ACM
1272EXPORT_SYMBOL_GPL(sk_clone);
1273
9958089a
AK
1274void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1275{
1276 __sk_dst_set(sk, dst);
1277 sk->sk_route_caps = dst->dev->features;
1278 if (sk->sk_route_caps & NETIF_F_GSO)
4fcd6b99 1279 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
a465419b 1280 sk->sk_route_caps &= ~sk->sk_route_nocaps;
9958089a 1281 if (sk_can_gso(sk)) {
82cc1a7a 1282 if (dst->header_len) {
9958089a 1283 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
82cc1a7a 1284 } else {
9958089a 1285 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
82cc1a7a
PWJ
1286 sk->sk_gso_max_size = dst->dev->gso_max_size;
1287 }
9958089a
AK
1288 }
1289}
1290EXPORT_SYMBOL_GPL(sk_setup_caps);
1291
1da177e4
LT
1292void __init sk_init(void)
1293{
4481374c 1294 if (totalram_pages <= 4096) {
1da177e4
LT
1295 sysctl_wmem_max = 32767;
1296 sysctl_rmem_max = 32767;
1297 sysctl_wmem_default = 32767;
1298 sysctl_rmem_default = 32767;
4481374c 1299 } else if (totalram_pages >= 131072) {
1da177e4
LT
1300 sysctl_wmem_max = 131071;
1301 sysctl_rmem_max = 131071;
1302 }
1303}
1304
1305/*
1306 * Simple resource managers for sockets.
1307 */
1308
1309
4ec93edb
YH
1310/*
1311 * Write buffer destructor automatically called from kfree_skb.
1da177e4
LT
1312 */
1313void sock_wfree(struct sk_buff *skb)
1314{
1315 struct sock *sk = skb->sk;
d99927f4 1316 unsigned int len = skb->truesize;
1da177e4 1317
d99927f4
ED
1318 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1319 /*
1320 * Keep a reference on sk_wmem_alloc, this will be released
1321 * after sk_write_space() call
1322 */
1323 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1da177e4 1324 sk->sk_write_space(sk);
d99927f4
ED
1325 len = 1;
1326 }
2b85a34e 1327 /*
d99927f4
ED
1328 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1329 * could not do because of in-flight packets
2b85a34e 1330 */
d99927f4 1331 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
2b85a34e 1332 __sk_free(sk);
1da177e4 1333}
2a91525c 1334EXPORT_SYMBOL(sock_wfree);
1da177e4 1335
4ec93edb
YH
1336/*
1337 * Read buffer destructor automatically called from kfree_skb.
1da177e4
LT
1338 */
1339void sock_rfree(struct sk_buff *skb)
1340{
1341 struct sock *sk = skb->sk;
d361fd59 1342 unsigned int len = skb->truesize;
1da177e4 1343
d361fd59
ED
1344 atomic_sub(len, &sk->sk_rmem_alloc);
1345 sk_mem_uncharge(sk, len);
1da177e4 1346}
2a91525c 1347EXPORT_SYMBOL(sock_rfree);
1da177e4
LT
1348
1349
1350int sock_i_uid(struct sock *sk)
1351{
1352 int uid;
1353
1354 read_lock(&sk->sk_callback_lock);
1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1356 read_unlock(&sk->sk_callback_lock);
1357 return uid;
1358}
2a91525c 1359EXPORT_SYMBOL(sock_i_uid);
1da177e4
LT
1360
1361unsigned long sock_i_ino(struct sock *sk)
1362{
1363 unsigned long ino;
1364
1365 read_lock(&sk->sk_callback_lock);
1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1367 read_unlock(&sk->sk_callback_lock);
1368 return ino;
1369}
2a91525c 1370EXPORT_SYMBOL(sock_i_ino);
1da177e4
LT
1371
1372/*
1373 * Allocate a skb from the socket's send buffer.
1374 */
86a76caf 1375struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1376 gfp_t priority)
1da177e4
LT
1377{
1378 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
2a91525c 1379 struct sk_buff *skb = alloc_skb(size, priority);
1da177e4
LT
1380 if (skb) {
1381 skb_set_owner_w(skb, sk);
1382 return skb;
1383 }
1384 }
1385 return NULL;
1386}
2a91525c 1387EXPORT_SYMBOL(sock_wmalloc);
1da177e4
LT
1388
1389/*
1390 * Allocate a skb from the socket's receive buffer.
4ec93edb 1391 */
86a76caf 1392struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1393 gfp_t priority)
1da177e4
LT
1394{
1395 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1396 struct sk_buff *skb = alloc_skb(size, priority);
1397 if (skb) {
1398 skb_set_owner_r(skb, sk);
1399 return skb;
1400 }
1401 }
1402 return NULL;
1403}
1404
4ec93edb 1405/*
1da177e4 1406 * Allocate a memory block from the socket's option memory buffer.
4ec93edb 1407 */
dd0fc66f 1408void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1da177e4
LT
1409{
1410 if ((unsigned)size <= sysctl_optmem_max &&
1411 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1412 void *mem;
1413 /* First do the add, to avoid the race if kmalloc
4ec93edb 1414 * might sleep.
1da177e4
LT
1415 */
1416 atomic_add(size, &sk->sk_omem_alloc);
1417 mem = kmalloc(size, priority);
1418 if (mem)
1419 return mem;
1420 atomic_sub(size, &sk->sk_omem_alloc);
1421 }
1422 return NULL;
1423}
2a91525c 1424EXPORT_SYMBOL(sock_kmalloc);
1da177e4
LT
1425
1426/*
1427 * Free an option memory block.
1428 */
1429void sock_kfree_s(struct sock *sk, void *mem, int size)
1430{
1431 kfree(mem);
1432 atomic_sub(size, &sk->sk_omem_alloc);
1433}
2a91525c 1434EXPORT_SYMBOL(sock_kfree_s);
1da177e4
LT
1435
1436/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1437 I think, these locks should be removed for datagram sockets.
1438 */
2a91525c 1439static long sock_wait_for_wmem(struct sock *sk, long timeo)
1da177e4
LT
1440{
1441 DEFINE_WAIT(wait);
1442
1443 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1444 for (;;) {
1445 if (!timeo)
1446 break;
1447 if (signal_pending(current))
1448 break;
1449 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
aa395145 1450 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4
LT
1451 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1452 break;
1453 if (sk->sk_shutdown & SEND_SHUTDOWN)
1454 break;
1455 if (sk->sk_err)
1456 break;
1457 timeo = schedule_timeout(timeo);
1458 }
aa395145 1459 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
1460 return timeo;
1461}
1462
1463
1464/*
1465 * Generic send/receive buffer handlers
1466 */
1467
4cc7f68d
HX
1468struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1469 unsigned long data_len, int noblock,
1470 int *errcode)
1da177e4
LT
1471{
1472 struct sk_buff *skb;
7d877f3b 1473 gfp_t gfp_mask;
1da177e4
LT
1474 long timeo;
1475 int err;
1476
1477 gfp_mask = sk->sk_allocation;
1478 if (gfp_mask & __GFP_WAIT)
1479 gfp_mask |= __GFP_REPEAT;
1480
1481 timeo = sock_sndtimeo(sk, noblock);
1482 while (1) {
1483 err = sock_error(sk);
1484 if (err != 0)
1485 goto failure;
1486
1487 err = -EPIPE;
1488 if (sk->sk_shutdown & SEND_SHUTDOWN)
1489 goto failure;
1490
1491 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
db38c179 1492 skb = alloc_skb(header_len, gfp_mask);
1da177e4
LT
1493 if (skb) {
1494 int npages;
1495 int i;
1496
1497 /* No pages, we're done... */
1498 if (!data_len)
1499 break;
1500
1501 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1502 skb->truesize += data_len;
1503 skb_shinfo(skb)->nr_frags = npages;
1504 for (i = 0; i < npages; i++) {
1505 struct page *page;
1506 skb_frag_t *frag;
1507
1508 page = alloc_pages(sk->sk_allocation, 0);
1509 if (!page) {
1510 err = -ENOBUFS;
1511 skb_shinfo(skb)->nr_frags = i;
1512 kfree_skb(skb);
1513 goto failure;
1514 }
1515
1516 frag = &skb_shinfo(skb)->frags[i];
1517 frag->page = page;
1518 frag->page_offset = 0;
1519 frag->size = (data_len >= PAGE_SIZE ?
1520 PAGE_SIZE :
1521 data_len);
1522 data_len -= PAGE_SIZE;
1523 }
1524
1525 /* Full success... */
1526 break;
1527 }
1528 err = -ENOBUFS;
1529 goto failure;
1530 }
1531 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1532 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1533 err = -EAGAIN;
1534 if (!timeo)
1535 goto failure;
1536 if (signal_pending(current))
1537 goto interrupted;
1538 timeo = sock_wait_for_wmem(sk, timeo);
1539 }
1540
1541 skb_set_owner_w(skb, sk);
1542 return skb;
1543
1544interrupted:
1545 err = sock_intr_errno(timeo);
1546failure:
1547 *errcode = err;
1548 return NULL;
1549}
4cc7f68d 1550EXPORT_SYMBOL(sock_alloc_send_pskb);
1da177e4 1551
4ec93edb 1552struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1da177e4
LT
1553 int noblock, int *errcode)
1554{
1555 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1556}
2a91525c 1557EXPORT_SYMBOL(sock_alloc_send_skb);
1da177e4
LT
1558
1559static void __lock_sock(struct sock *sk)
f39234d6
NK
1560 __releases(&sk->sk_lock.slock)
1561 __acquires(&sk->sk_lock.slock)
1da177e4
LT
1562{
1563 DEFINE_WAIT(wait);
1564
e71a4783 1565 for (;;) {
1da177e4
LT
1566 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1567 TASK_UNINTERRUPTIBLE);
1568 spin_unlock_bh(&sk->sk_lock.slock);
1569 schedule();
1570 spin_lock_bh(&sk->sk_lock.slock);
e71a4783 1571 if (!sock_owned_by_user(sk))
1da177e4
LT
1572 break;
1573 }
1574 finish_wait(&sk->sk_lock.wq, &wait);
1575}
1576
1577static void __release_sock(struct sock *sk)
f39234d6
NK
1578 __releases(&sk->sk_lock.slock)
1579 __acquires(&sk->sk_lock.slock)
1da177e4
LT
1580{
1581 struct sk_buff *skb = sk->sk_backlog.head;
1582
1583 do {
1584 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1585 bh_unlock_sock(sk);
1586
1587 do {
1588 struct sk_buff *next = skb->next;
1589
7fee226a 1590 WARN_ON_ONCE(skb_dst_is_noref(skb));
1da177e4 1591 skb->next = NULL;
c57943a1 1592 sk_backlog_rcv(sk, skb);
1da177e4
LT
1593
1594 /*
1595 * We are in process context here with softirqs
1596 * disabled, use cond_resched_softirq() to preempt.
1597 * This is safe to do because we've taken the backlog
1598 * queue private:
1599 */
1600 cond_resched_softirq();
1601
1602 skb = next;
1603 } while (skb != NULL);
1604
1605 bh_lock_sock(sk);
e71a4783 1606 } while ((skb = sk->sk_backlog.head) != NULL);
8eae939f
ZY
1607
1608 /*
1609 * Doing the zeroing here guarantee we can not loop forever
1610 * while a wild producer attempts to flood us.
1611 */
1612 sk->sk_backlog.len = 0;
1da177e4
LT
1613}
1614
1615/**
1616 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
1617 * @sk: sock to wait on
1618 * @timeo: for how long
1da177e4
LT
1619 *
1620 * Now socket state including sk->sk_err is changed only under lock,
1621 * hence we may omit checks after joining wait queue.
1622 * We check receive queue before schedule() only as optimization;
1623 * it is very likely that release_sock() added new data.
1624 */
1625int sk_wait_data(struct sock *sk, long *timeo)
1626{
1627 int rc;
1628 DEFINE_WAIT(wait);
1629
aa395145 1630 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4
LT
1631 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1632 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1633 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
aa395145 1634 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
1635 return rc;
1636}
1da177e4
LT
1637EXPORT_SYMBOL(sk_wait_data);
1638
3ab224be
HA
1639/**
1640 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1641 * @sk: socket
1642 * @size: memory size to allocate
1643 * @kind: allocation type
1644 *
1645 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1646 * rmem allocation. This function assumes that protocols which have
1647 * memory_pressure use sk_wmem_queued as write buffer accounting.
1648 */
1649int __sk_mem_schedule(struct sock *sk, int size, int kind)
1650{
1651 struct proto *prot = sk->sk_prot;
1652 int amt = sk_mem_pages(size);
1653 int allocated;
1654
1655 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1656 allocated = atomic_add_return(amt, prot->memory_allocated);
1657
1658 /* Under limit. */
1659 if (allocated <= prot->sysctl_mem[0]) {
1660 if (prot->memory_pressure && *prot->memory_pressure)
1661 *prot->memory_pressure = 0;
1662 return 1;
1663 }
1664
1665 /* Under pressure. */
1666 if (allocated > prot->sysctl_mem[1])
1667 if (prot->enter_memory_pressure)
5c52ba17 1668 prot->enter_memory_pressure(sk);
3ab224be
HA
1669
1670 /* Over hard limit. */
1671 if (allocated > prot->sysctl_mem[2])
1672 goto suppress_allocation;
1673
1674 /* guarantee minimum buffer size under pressure */
1675 if (kind == SK_MEM_RECV) {
1676 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1677 return 1;
1678 } else { /* SK_MEM_SEND */
1679 if (sk->sk_type == SOCK_STREAM) {
1680 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1681 return 1;
1682 } else if (atomic_read(&sk->sk_wmem_alloc) <
1683 prot->sysctl_wmem[0])
1684 return 1;
1685 }
1686
1687 if (prot->memory_pressure) {
1748376b
ED
1688 int alloc;
1689
1690 if (!*prot->memory_pressure)
1691 return 1;
1692 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1693 if (prot->sysctl_mem[2] > alloc *
3ab224be
HA
1694 sk_mem_pages(sk->sk_wmem_queued +
1695 atomic_read(&sk->sk_rmem_alloc) +
1696 sk->sk_forward_alloc))
1697 return 1;
1698 }
1699
1700suppress_allocation:
1701
1702 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1703 sk_stream_moderate_sndbuf(sk);
1704
1705 /* Fail only if socket is _under_ its sndbuf.
1706 * In this case we cannot block, so that we have to fail.
1707 */
1708 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1709 return 1;
1710 }
1711
1712 /* Alas. Undo changes. */
1713 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1714 atomic_sub(amt, prot->memory_allocated);
1715 return 0;
1716}
3ab224be
HA
1717EXPORT_SYMBOL(__sk_mem_schedule);
1718
1719/**
1720 * __sk_reclaim - reclaim memory_allocated
1721 * @sk: socket
1722 */
1723void __sk_mem_reclaim(struct sock *sk)
1724{
1725 struct proto *prot = sk->sk_prot;
1726
680a5a50 1727 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
3ab224be
HA
1728 prot->memory_allocated);
1729 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1730
1731 if (prot->memory_pressure && *prot->memory_pressure &&
1732 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1733 *prot->memory_pressure = 0;
1734}
3ab224be
HA
1735EXPORT_SYMBOL(__sk_mem_reclaim);
1736
1737
1da177e4
LT
1738/*
1739 * Set of default routines for initialising struct proto_ops when
1740 * the protocol does not support a particular function. In certain
1741 * cases where it makes no sense for a protocol to have a "do nothing"
1742 * function, some default processing is provided.
1743 */
1744
1745int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1746{
1747 return -EOPNOTSUPP;
1748}
2a91525c 1749EXPORT_SYMBOL(sock_no_bind);
1da177e4 1750
4ec93edb 1751int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
1752 int len, int flags)
1753{
1754 return -EOPNOTSUPP;
1755}
2a91525c 1756EXPORT_SYMBOL(sock_no_connect);
1da177e4
LT
1757
1758int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1759{
1760 return -EOPNOTSUPP;
1761}
2a91525c 1762EXPORT_SYMBOL(sock_no_socketpair);
1da177e4
LT
1763
1764int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1765{
1766 return -EOPNOTSUPP;
1767}
2a91525c 1768EXPORT_SYMBOL(sock_no_accept);
1da177e4 1769
4ec93edb 1770int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
1771 int *len, int peer)
1772{
1773 return -EOPNOTSUPP;
1774}
2a91525c 1775EXPORT_SYMBOL(sock_no_getname);
1da177e4 1776
2a91525c 1777unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1da177e4
LT
1778{
1779 return 0;
1780}
2a91525c 1781EXPORT_SYMBOL(sock_no_poll);
1da177e4
LT
1782
1783int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1784{
1785 return -EOPNOTSUPP;
1786}
2a91525c 1787EXPORT_SYMBOL(sock_no_ioctl);
1da177e4
LT
1788
1789int sock_no_listen(struct socket *sock, int backlog)
1790{
1791 return -EOPNOTSUPP;
1792}
2a91525c 1793EXPORT_SYMBOL(sock_no_listen);
1da177e4
LT
1794
1795int sock_no_shutdown(struct socket *sock, int how)
1796{
1797 return -EOPNOTSUPP;
1798}
2a91525c 1799EXPORT_SYMBOL(sock_no_shutdown);
1da177e4
LT
1800
1801int sock_no_setsockopt(struct socket *sock, int level, int optname,
b7058842 1802 char __user *optval, unsigned int optlen)
1da177e4
LT
1803{
1804 return -EOPNOTSUPP;
1805}
2a91525c 1806EXPORT_SYMBOL(sock_no_setsockopt);
1da177e4
LT
1807
1808int sock_no_getsockopt(struct socket *sock, int level, int optname,
1809 char __user *optval, int __user *optlen)
1810{
1811 return -EOPNOTSUPP;
1812}
2a91525c 1813EXPORT_SYMBOL(sock_no_getsockopt);
1da177e4
LT
1814
1815int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1816 size_t len)
1817{
1818 return -EOPNOTSUPP;
1819}
2a91525c 1820EXPORT_SYMBOL(sock_no_sendmsg);
1da177e4
LT
1821
1822int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1823 size_t len, int flags)
1824{
1825 return -EOPNOTSUPP;
1826}
2a91525c 1827EXPORT_SYMBOL(sock_no_recvmsg);
1da177e4
LT
1828
1829int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1830{
1831 /* Mirror missing mmap method error code */
1832 return -ENODEV;
1833}
2a91525c 1834EXPORT_SYMBOL(sock_no_mmap);
1da177e4
LT
1835
1836ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1837{
1838 ssize_t res;
1839 struct msghdr msg = {.msg_flags = flags};
1840 struct kvec iov;
1841 char *kaddr = kmap(page);
1842 iov.iov_base = kaddr + offset;
1843 iov.iov_len = size;
1844 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1845 kunmap(page);
1846 return res;
1847}
2a91525c 1848EXPORT_SYMBOL(sock_no_sendpage);
1da177e4
LT
1849
1850/*
1851 * Default Socket Callbacks
1852 */
1853
1854static void sock_def_wakeup(struct sock *sk)
1855{
43815482
ED
1856 struct socket_wq *wq;
1857
1858 rcu_read_lock();
1859 wq = rcu_dereference(sk->sk_wq);
1860 if (wq_has_sleeper(wq))
1861 wake_up_interruptible_all(&wq->wait);
1862 rcu_read_unlock();
1da177e4
LT
1863}
1864
1865static void sock_def_error_report(struct sock *sk)
1866{
43815482
ED
1867 struct socket_wq *wq;
1868
1869 rcu_read_lock();
1870 wq = rcu_dereference(sk->sk_wq);
1871 if (wq_has_sleeper(wq))
1872 wake_up_interruptible_poll(&wq->wait, POLLERR);
8d8ad9d7 1873 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
43815482 1874 rcu_read_unlock();
1da177e4
LT
1875}
1876
1877static void sock_def_readable(struct sock *sk, int len)
1878{
43815482
ED
1879 struct socket_wq *wq;
1880
1881 rcu_read_lock();
1882 wq = rcu_dereference(sk->sk_wq);
1883 if (wq_has_sleeper(wq))
1884 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
37e5540b 1885 POLLRDNORM | POLLRDBAND);
8d8ad9d7 1886 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
43815482 1887 rcu_read_unlock();
1da177e4
LT
1888}
1889
1890static void sock_def_write_space(struct sock *sk)
1891{
43815482
ED
1892 struct socket_wq *wq;
1893
1894 rcu_read_lock();
1da177e4
LT
1895
1896 /* Do not wake up a writer until he can make "significant"
1897 * progress. --DaveM
1898 */
e71a4783 1899 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
43815482
ED
1900 wq = rcu_dereference(sk->sk_wq);
1901 if (wq_has_sleeper(wq))
1902 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
37e5540b 1903 POLLWRNORM | POLLWRBAND);
1da177e4
LT
1904
1905 /* Should agree with poll, otherwise some programs break */
1906 if (sock_writeable(sk))
8d8ad9d7 1907 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
1908 }
1909
43815482 1910 rcu_read_unlock();
1da177e4
LT
1911}
1912
1913static void sock_def_destruct(struct sock *sk)
1914{
a51482bd 1915 kfree(sk->sk_protinfo);
1da177e4
LT
1916}
1917
1918void sk_send_sigurg(struct sock *sk)
1919{
1920 if (sk->sk_socket && sk->sk_socket->file)
1921 if (send_sigurg(&sk->sk_socket->file->f_owner))
8d8ad9d7 1922 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1da177e4 1923}
2a91525c 1924EXPORT_SYMBOL(sk_send_sigurg);
1da177e4
LT
1925
1926void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1927 unsigned long expires)
1928{
1929 if (!mod_timer(timer, expires))
1930 sock_hold(sk);
1931}
1da177e4
LT
1932EXPORT_SYMBOL(sk_reset_timer);
1933
1934void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1935{
1936 if (timer_pending(timer) && del_timer(timer))
1937 __sock_put(sk);
1938}
1da177e4
LT
1939EXPORT_SYMBOL(sk_stop_timer);
1940
1941void sock_init_data(struct socket *sock, struct sock *sk)
1942{
1943 skb_queue_head_init(&sk->sk_receive_queue);
1944 skb_queue_head_init(&sk->sk_write_queue);
1945 skb_queue_head_init(&sk->sk_error_queue);
97fc2f08
CL
1946#ifdef CONFIG_NET_DMA
1947 skb_queue_head_init(&sk->sk_async_wait_queue);
1948#endif
1da177e4
LT
1949
1950 sk->sk_send_head = NULL;
1951
1952 init_timer(&sk->sk_timer);
4ec93edb 1953
1da177e4
LT
1954 sk->sk_allocation = GFP_KERNEL;
1955 sk->sk_rcvbuf = sysctl_rmem_default;
1956 sk->sk_sndbuf = sysctl_wmem_default;
1957 sk->sk_state = TCP_CLOSE;
972692e0 1958 sk_set_socket(sk, sock);
1da177e4
LT
1959
1960 sock_set_flag(sk, SOCK_ZAPPED);
1961
e71a4783 1962 if (sock) {
1da177e4 1963 sk->sk_type = sock->type;
43815482 1964 sk->sk_wq = sock->wq;
1da177e4
LT
1965 sock->sk = sk;
1966 } else
43815482 1967 sk->sk_wq = NULL;
1da177e4 1968
b6c6712a 1969 spin_lock_init(&sk->sk_dst_lock);
1da177e4 1970 rwlock_init(&sk->sk_callback_lock);
443aef0e
PZ
1971 lockdep_set_class_and_name(&sk->sk_callback_lock,
1972 af_callback_keys + sk->sk_family,
1973 af_family_clock_key_strings[sk->sk_family]);
1da177e4
LT
1974
1975 sk->sk_state_change = sock_def_wakeup;
1976 sk->sk_data_ready = sock_def_readable;
1977 sk->sk_write_space = sock_def_write_space;
1978 sk->sk_error_report = sock_def_error_report;
1979 sk->sk_destruct = sock_def_destruct;
1980
1981 sk->sk_sndmsg_page = NULL;
1982 sk->sk_sndmsg_off = 0;
1983
109f6e39
EB
1984 sk->sk_peer_pid = NULL;
1985 sk->sk_peer_cred = NULL;
1da177e4
LT
1986 sk->sk_write_pending = 0;
1987 sk->sk_rcvlowat = 1;
1988 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1989 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1990
f37f0afb 1991 sk->sk_stamp = ktime_set(-1L, 0);
1da177e4 1992
4dc6dc71
ED
1993 /*
1994 * Before updating sk_refcnt, we must commit prior changes to memory
1995 * (Documentation/RCU/rculist_nulls.txt for details)
1996 */
1997 smp_wmb();
1da177e4 1998 atomic_set(&sk->sk_refcnt, 1);
33c732c3 1999 atomic_set(&sk->sk_drops, 0);
1da177e4 2000}
2a91525c 2001EXPORT_SYMBOL(sock_init_data);
1da177e4 2002
b5606c2d 2003void lock_sock_nested(struct sock *sk, int subclass)
1da177e4
LT
2004{
2005 might_sleep();
a5b5bb9a 2006 spin_lock_bh(&sk->sk_lock.slock);
d2e9117c 2007 if (sk->sk_lock.owned)
1da177e4 2008 __lock_sock(sk);
d2e9117c 2009 sk->sk_lock.owned = 1;
a5b5bb9a
IM
2010 spin_unlock(&sk->sk_lock.slock);
2011 /*
2012 * The sk_lock has mutex_lock() semantics here:
2013 */
fcc70d5f 2014 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
a5b5bb9a 2015 local_bh_enable();
1da177e4 2016}
fcc70d5f 2017EXPORT_SYMBOL(lock_sock_nested);
1da177e4 2018
b5606c2d 2019void release_sock(struct sock *sk)
1da177e4 2020{
a5b5bb9a
IM
2021 /*
2022 * The sk_lock has mutex_unlock() semantics:
2023 */
2024 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2025
2026 spin_lock_bh(&sk->sk_lock.slock);
1da177e4
LT
2027 if (sk->sk_backlog.tail)
2028 __release_sock(sk);
d2e9117c 2029 sk->sk_lock.owned = 0;
a5b5bb9a
IM
2030 if (waitqueue_active(&sk->sk_lock.wq))
2031 wake_up(&sk->sk_lock.wq);
2032 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4
LT
2033}
2034EXPORT_SYMBOL(release_sock);
2035
8a74ad60
ED
2036/**
2037 * lock_sock_fast - fast version of lock_sock
2038 * @sk: socket
2039 *
2040 * This version should be used for very small section, where process wont block
2041 * return false if fast path is taken
2042 * sk_lock.slock locked, owned = 0, BH disabled
2043 * return true if slow path is taken
2044 * sk_lock.slock unlocked, owned = 1, BH enabled
2045 */
2046bool lock_sock_fast(struct sock *sk)
2047{
2048 might_sleep();
2049 spin_lock_bh(&sk->sk_lock.slock);
2050
2051 if (!sk->sk_lock.owned)
2052 /*
2053 * Note : We must disable BH
2054 */
2055 return false;
2056
2057 __lock_sock(sk);
2058 sk->sk_lock.owned = 1;
2059 spin_unlock(&sk->sk_lock.slock);
2060 /*
2061 * The sk_lock has mutex_lock() semantics here:
2062 */
2063 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2064 local_bh_enable();
2065 return true;
2066}
2067EXPORT_SYMBOL(lock_sock_fast);
2068
1da177e4 2069int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
4ec93edb 2070{
b7aa0bf7 2071 struct timeval tv;
1da177e4 2072 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 2073 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
b7aa0bf7
ED
2074 tv = ktime_to_timeval(sk->sk_stamp);
2075 if (tv.tv_sec == -1)
1da177e4 2076 return -ENOENT;
b7aa0bf7
ED
2077 if (tv.tv_sec == 0) {
2078 sk->sk_stamp = ktime_get_real();
2079 tv = ktime_to_timeval(sk->sk_stamp);
2080 }
2081 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
4ec93edb 2082}
1da177e4
LT
2083EXPORT_SYMBOL(sock_get_timestamp);
2084
ae40eb1e
ED
2085int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2086{
2087 struct timespec ts;
2088 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 2089 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ae40eb1e
ED
2090 ts = ktime_to_timespec(sk->sk_stamp);
2091 if (ts.tv_sec == -1)
2092 return -ENOENT;
2093 if (ts.tv_sec == 0) {
2094 sk->sk_stamp = ktime_get_real();
2095 ts = ktime_to_timespec(sk->sk_stamp);
2096 }
2097 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2098}
2099EXPORT_SYMBOL(sock_get_timestampns);
2100
20d49473 2101void sock_enable_timestamp(struct sock *sk, int flag)
4ec93edb 2102{
20d49473
PO
2103 if (!sock_flag(sk, flag)) {
2104 sock_set_flag(sk, flag);
2105 /*
2106 * we just set one of the two flags which require net
2107 * time stamping, but time stamping might have been on
2108 * already because of the other one
2109 */
2110 if (!sock_flag(sk,
2111 flag == SOCK_TIMESTAMP ?
2112 SOCK_TIMESTAMPING_RX_SOFTWARE :
2113 SOCK_TIMESTAMP))
2114 net_enable_timestamp();
1da177e4
LT
2115 }
2116}
1da177e4
LT
2117
2118/*
2119 * Get a socket option on an socket.
2120 *
2121 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2122 * asynchronous errors should be reported by getsockopt. We assume
2123 * this means if you specify SO_ERROR (otherwise whats the point of it).
2124 */
2125int sock_common_getsockopt(struct socket *sock, int level, int optname,
2126 char __user *optval, int __user *optlen)
2127{
2128 struct sock *sk = sock->sk;
2129
2130 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2131}
1da177e4
LT
2132EXPORT_SYMBOL(sock_common_getsockopt);
2133
3fdadf7d 2134#ifdef CONFIG_COMPAT
543d9cfe
ACM
2135int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2136 char __user *optval, int __user *optlen)
3fdadf7d
DM
2137{
2138 struct sock *sk = sock->sk;
2139
1e51f951 2140 if (sk->sk_prot->compat_getsockopt != NULL)
543d9cfe
ACM
2141 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2142 optval, optlen);
3fdadf7d
DM
2143 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2144}
2145EXPORT_SYMBOL(compat_sock_common_getsockopt);
2146#endif
2147
1da177e4
LT
2148int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2149 struct msghdr *msg, size_t size, int flags)
2150{
2151 struct sock *sk = sock->sk;
2152 int addr_len = 0;
2153 int err;
2154
2155 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2156 flags & ~MSG_DONTWAIT, &addr_len);
2157 if (err >= 0)
2158 msg->msg_namelen = addr_len;
2159 return err;
2160}
1da177e4
LT
2161EXPORT_SYMBOL(sock_common_recvmsg);
2162
2163/*
2164 * Set socket options on an inet socket.
2165 */
2166int sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 2167 char __user *optval, unsigned int optlen)
1da177e4
LT
2168{
2169 struct sock *sk = sock->sk;
2170
2171 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2172}
1da177e4
LT
2173EXPORT_SYMBOL(sock_common_setsockopt);
2174
3fdadf7d 2175#ifdef CONFIG_COMPAT
543d9cfe 2176int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 2177 char __user *optval, unsigned int optlen)
3fdadf7d
DM
2178{
2179 struct sock *sk = sock->sk;
2180
543d9cfe
ACM
2181 if (sk->sk_prot->compat_setsockopt != NULL)
2182 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2183 optval, optlen);
3fdadf7d
DM
2184 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2185}
2186EXPORT_SYMBOL(compat_sock_common_setsockopt);
2187#endif
2188
1da177e4
LT
2189void sk_common_release(struct sock *sk)
2190{
2191 if (sk->sk_prot->destroy)
2192 sk->sk_prot->destroy(sk);
2193
2194 /*
2195 * Observation: when sock_common_release is called, processes have
2196 * no access to socket. But net still has.
2197 * Step one, detach it from networking:
2198 *
2199 * A. Remove from hash tables.
2200 */
2201
2202 sk->sk_prot->unhash(sk);
2203
2204 /*
2205 * In this point socket cannot receive new packets, but it is possible
2206 * that some packets are in flight because some CPU runs receiver and
2207 * did hash table lookup before we unhashed socket. They will achieve
2208 * receive queue and will be purged by socket destructor.
2209 *
2210 * Also we still have packets pending on receive queue and probably,
2211 * our own packets waiting in device queues. sock_destroy will drain
2212 * receive queue, but transmitted packets will delay socket destruction
2213 * until the last reference will be released.
2214 */
2215
2216 sock_orphan(sk);
2217
2218 xfrm_sk_free_policy(sk);
2219
e6848976 2220 sk_refcnt_debug_release(sk);
1da177e4
LT
2221 sock_put(sk);
2222}
1da177e4
LT
2223EXPORT_SYMBOL(sk_common_release);
2224
2225static DEFINE_RWLOCK(proto_list_lock);
2226static LIST_HEAD(proto_list);
2227
13ff3d6f
PE
2228#ifdef CONFIG_PROC_FS
2229#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1338d466
PE
2230struct prot_inuse {
2231 int val[PROTO_INUSE_NR];
2232};
13ff3d6f
PE
2233
2234static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
70ee1159
PE
2235
2236#ifdef CONFIG_NET_NS
2237void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2238{
d6d9ca0f 2239 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
70ee1159
PE
2240}
2241EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2242
2243int sock_prot_inuse_get(struct net *net, struct proto *prot)
2244{
2245 int cpu, idx = prot->inuse_idx;
2246 int res = 0;
2247
2248 for_each_possible_cpu(cpu)
2249 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2250
2251 return res >= 0 ? res : 0;
2252}
2253EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2254
2c8c1e72 2255static int __net_init sock_inuse_init_net(struct net *net)
70ee1159
PE
2256{
2257 net->core.inuse = alloc_percpu(struct prot_inuse);
2258 return net->core.inuse ? 0 : -ENOMEM;
2259}
2260
2c8c1e72 2261static void __net_exit sock_inuse_exit_net(struct net *net)
70ee1159
PE
2262{
2263 free_percpu(net->core.inuse);
2264}
2265
2266static struct pernet_operations net_inuse_ops = {
2267 .init = sock_inuse_init_net,
2268 .exit = sock_inuse_exit_net,
2269};
2270
2271static __init int net_inuse_init(void)
2272{
2273 if (register_pernet_subsys(&net_inuse_ops))
2274 panic("Cannot initialize net inuse counters");
2275
2276 return 0;
2277}
2278
2279core_initcall(net_inuse_init);
2280#else
1338d466
PE
2281static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2282
c29a0bc4 2283void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
1338d466 2284{
d6d9ca0f 2285 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
1338d466
PE
2286}
2287EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2288
c29a0bc4 2289int sock_prot_inuse_get(struct net *net, struct proto *prot)
1338d466
PE
2290{
2291 int cpu, idx = prot->inuse_idx;
2292 int res = 0;
2293
2294 for_each_possible_cpu(cpu)
2295 res += per_cpu(prot_inuse, cpu).val[idx];
2296
2297 return res >= 0 ? res : 0;
2298}
2299EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
70ee1159 2300#endif
13ff3d6f
PE
2301
2302static void assign_proto_idx(struct proto *prot)
2303{
2304 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2305
2306 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2307 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2308 return;
2309 }
2310
2311 set_bit(prot->inuse_idx, proto_inuse_idx);
2312}
2313
2314static void release_proto_idx(struct proto *prot)
2315{
2316 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2317 clear_bit(prot->inuse_idx, proto_inuse_idx);
2318}
2319#else
2320static inline void assign_proto_idx(struct proto *prot)
2321{
2322}
2323
2324static inline void release_proto_idx(struct proto *prot)
2325{
2326}
2327#endif
2328
b733c007
PE
2329int proto_register(struct proto *prot, int alloc_slab)
2330{
1da177e4
LT
2331 if (alloc_slab) {
2332 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
271b72c7
ED
2333 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2334 NULL);
1da177e4
LT
2335
2336 if (prot->slab == NULL) {
2337 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2338 prot->name);
60e7663d 2339 goto out;
1da177e4 2340 }
2e6599cb
ACM
2341
2342 if (prot->rsk_prot != NULL) {
faf23422 2343 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
7e56b5d6 2344 if (prot->rsk_prot->slab_name == NULL)
2e6599cb
ACM
2345 goto out_free_sock_slab;
2346
7e56b5d6 2347 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2e6599cb 2348 prot->rsk_prot->obj_size, 0,
20c2df83 2349 SLAB_HWCACHE_ALIGN, NULL);
2e6599cb
ACM
2350
2351 if (prot->rsk_prot->slab == NULL) {
2352 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2353 prot->name);
2354 goto out_free_request_sock_slab_name;
2355 }
2356 }
8feaf0c0 2357
6d6ee43e 2358 if (prot->twsk_prot != NULL) {
faf23422 2359 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
8feaf0c0 2360
7e56b5d6 2361 if (prot->twsk_prot->twsk_slab_name == NULL)
8feaf0c0
ACM
2362 goto out_free_request_sock_slab;
2363
6d6ee43e 2364 prot->twsk_prot->twsk_slab =
7e56b5d6 2365 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
6d6ee43e 2366 prot->twsk_prot->twsk_obj_size,
3ab5aee7
ED
2367 0,
2368 SLAB_HWCACHE_ALIGN |
2369 prot->slab_flags,
20c2df83 2370 NULL);
6d6ee43e 2371 if (prot->twsk_prot->twsk_slab == NULL)
8feaf0c0
ACM
2372 goto out_free_timewait_sock_slab_name;
2373 }
1da177e4
LT
2374 }
2375
2a278051 2376 write_lock(&proto_list_lock);
1da177e4 2377 list_add(&prot->node, &proto_list);
13ff3d6f 2378 assign_proto_idx(prot);
1da177e4 2379 write_unlock(&proto_list_lock);
b733c007
PE
2380 return 0;
2381
8feaf0c0 2382out_free_timewait_sock_slab_name:
7e56b5d6 2383 kfree(prot->twsk_prot->twsk_slab_name);
8feaf0c0
ACM
2384out_free_request_sock_slab:
2385 if (prot->rsk_prot && prot->rsk_prot->slab) {
2386 kmem_cache_destroy(prot->rsk_prot->slab);
2387 prot->rsk_prot->slab = NULL;
2388 }
2e6599cb 2389out_free_request_sock_slab_name:
72150e9b
DC
2390 if (prot->rsk_prot)
2391 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2392out_free_sock_slab:
2393 kmem_cache_destroy(prot->slab);
2394 prot->slab = NULL;
b733c007
PE
2395out:
2396 return -ENOBUFS;
1da177e4 2397}
1da177e4
LT
2398EXPORT_SYMBOL(proto_register);
2399
2400void proto_unregister(struct proto *prot)
2401{
2402 write_lock(&proto_list_lock);
13ff3d6f 2403 release_proto_idx(prot);
0a3f4358
PM
2404 list_del(&prot->node);
2405 write_unlock(&proto_list_lock);
1da177e4
LT
2406
2407 if (prot->slab != NULL) {
2408 kmem_cache_destroy(prot->slab);
2409 prot->slab = NULL;
2410 }
2411
2e6599cb 2412 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2e6599cb 2413 kmem_cache_destroy(prot->rsk_prot->slab);
7e56b5d6 2414 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2415 prot->rsk_prot->slab = NULL;
2416 }
2417
6d6ee43e 2418 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
6d6ee43e 2419 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
7e56b5d6 2420 kfree(prot->twsk_prot->twsk_slab_name);
6d6ee43e 2421 prot->twsk_prot->twsk_slab = NULL;
8feaf0c0 2422 }
1da177e4 2423}
1da177e4
LT
2424EXPORT_SYMBOL(proto_unregister);
2425
2426#ifdef CONFIG_PROC_FS
1da177e4 2427static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2428 __acquires(proto_list_lock)
1da177e4
LT
2429{
2430 read_lock(&proto_list_lock);
60f0438a 2431 return seq_list_start_head(&proto_list, *pos);
1da177e4
LT
2432}
2433
2434static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2435{
60f0438a 2436 return seq_list_next(v, &proto_list, pos);
1da177e4
LT
2437}
2438
2439static void proto_seq_stop(struct seq_file *seq, void *v)
9a429c49 2440 __releases(proto_list_lock)
1da177e4
LT
2441{
2442 read_unlock(&proto_list_lock);
2443}
2444
2445static char proto_method_implemented(const void *method)
2446{
2447 return method == NULL ? 'n' : 'y';
2448}
2449
2450static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2451{
2452 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2453 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2454 proto->name,
2455 proto->obj_size,
14e943db 2456 sock_prot_inuse_get(seq_file_net(seq), proto),
1da177e4
LT
2457 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2458 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2459 proto->max_header,
2460 proto->slab == NULL ? "no" : "yes",
2461 module_name(proto->owner),
2462 proto_method_implemented(proto->close),
2463 proto_method_implemented(proto->connect),
2464 proto_method_implemented(proto->disconnect),
2465 proto_method_implemented(proto->accept),
2466 proto_method_implemented(proto->ioctl),
2467 proto_method_implemented(proto->init),
2468 proto_method_implemented(proto->destroy),
2469 proto_method_implemented(proto->shutdown),
2470 proto_method_implemented(proto->setsockopt),
2471 proto_method_implemented(proto->getsockopt),
2472 proto_method_implemented(proto->sendmsg),
2473 proto_method_implemented(proto->recvmsg),
2474 proto_method_implemented(proto->sendpage),
2475 proto_method_implemented(proto->bind),
2476 proto_method_implemented(proto->backlog_rcv),
2477 proto_method_implemented(proto->hash),
2478 proto_method_implemented(proto->unhash),
2479 proto_method_implemented(proto->get_port),
2480 proto_method_implemented(proto->enter_memory_pressure));
2481}
2482
2483static int proto_seq_show(struct seq_file *seq, void *v)
2484{
60f0438a 2485 if (v == &proto_list)
1da177e4
LT
2486 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2487 "protocol",
2488 "size",
2489 "sockets",
2490 "memory",
2491 "press",
2492 "maxhdr",
2493 "slab",
2494 "module",
2495 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2496 else
60f0438a 2497 proto_seq_printf(seq, list_entry(v, struct proto, node));
1da177e4
LT
2498 return 0;
2499}
2500
f690808e 2501static const struct seq_operations proto_seq_ops = {
1da177e4
LT
2502 .start = proto_seq_start,
2503 .next = proto_seq_next,
2504 .stop = proto_seq_stop,
2505 .show = proto_seq_show,
2506};
2507
2508static int proto_seq_open(struct inode *inode, struct file *file)
2509{
14e943db
ED
2510 return seq_open_net(inode, file, &proto_seq_ops,
2511 sizeof(struct seq_net_private));
1da177e4
LT
2512}
2513
9a32144e 2514static const struct file_operations proto_seq_fops = {
1da177e4
LT
2515 .owner = THIS_MODULE,
2516 .open = proto_seq_open,
2517 .read = seq_read,
2518 .llseek = seq_lseek,
14e943db
ED
2519 .release = seq_release_net,
2520};
2521
2522static __net_init int proto_init_net(struct net *net)
2523{
2524 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2525 return -ENOMEM;
2526
2527 return 0;
2528}
2529
2530static __net_exit void proto_exit_net(struct net *net)
2531{
2532 proc_net_remove(net, "protocols");
2533}
2534
2535
2536static __net_initdata struct pernet_operations proto_net_ops = {
2537 .init = proto_init_net,
2538 .exit = proto_exit_net,
1da177e4
LT
2539};
2540
2541static int __init proto_init(void)
2542{
14e943db 2543 return register_pernet_subsys(&proto_net_ops);
1da177e4
LT
2544}
2545
2546subsys_initcall(proto_init);
2547
2548#endif /* PROC_FS */