]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/unix/af_unix.c
net: unix: fix inflight counting bug in garbage collector
[net-next-2.6.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
1da177e4 84#include <linux/kernel.h>
1da177e4
LT
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
457c4cbc 104#include <net/net_namespace.h>
1da177e4 105#include <net/sock.h>
c752f073 106#include <net/tcp_states.h>
1da177e4
LT
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
1da177e4
LT
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
13111698
AB
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
1da177e4
LT
120static atomic_t unix_nr_socks = ATOMIC_INIT(0);
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
877ce7c1 126#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 128{
dc49c1f9 129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
dc49c1f9 134 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
135}
136#else
dc49c1f9 137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif /* CONFIG_SECURITY_NETWORK */
143
1da177e4
LT
144/*
145 * SMP locking strategy:
fbe9cc4a 146 * hash table is protected with spinlock unix_table_lock
1da177e4
LT
147 * each socket state is protected by separate rwlock.
148 */
149
44bb9363 150static inline unsigned unix_hash_fold(__wsum n)
1da177e4 151{
44bb9363 152 unsigned hash = (__force unsigned)n;
1da177e4
LT
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
167 return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
168}
169
3c73419c
RW
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
1da177e4
LT
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
1c92b4e5 179 unix_state_lock(s);
1da177e4
LT
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
1c92b4e5 183 unix_state_unlock(s);
1da177e4
LT
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193/*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
ac7bfa62 199
1da177e4
LT
200static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
214 ((char *)sunaddr)[len]=0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
547b792c 230 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
fbe9cc4a 236 spin_lock(&unix_table_lock);
1da177e4 237 __unix_remove_socket(sk);
fbe9cc4a 238 spin_unlock(&unix_table_lock);
1da177e4
LT
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
fbe9cc4a 243 spin_lock(&unix_table_lock);
1da177e4 244 __unix_insert_socket(list, sk);
fbe9cc4a 245 spin_unlock(&unix_table_lock);
1da177e4
LT
246}
247
097e66c5
DL
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
1da177e4
LT
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
878628fb 258 if (!net_eq(sock_net(s), net))
097e66c5
DL
259 continue;
260
1da177e4
LT
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
097e66c5
DL
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
1da177e4
LT
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
fbe9cc4a 277 spin_lock(&unix_table_lock);
097e66c5 278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
279 if (s)
280 sock_hold(s);
fbe9cc4a 281 spin_unlock(&unix_table_lock);
1da177e4
LT
282 return s;
283}
284
097e66c5 285static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
1da177e4
LT
286{
287 struct sock *s;
288 struct hlist_node *node;
289
fbe9cc4a 290 spin_lock(&unix_table_lock);
1da177e4
LT
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
878628fb 295 if (!net_eq(sock_net(s), net))
097e66c5
DL
296 continue;
297
1da177e4
LT
298 if(dentry && dentry->d_inode == i)
299 {
300 sock_hold(s);
301 goto found;
302 }
303 }
304 s = NULL;
305found:
fbe9cc4a 306 spin_unlock(&unix_table_lock);
1da177e4
LT
307 return s;
308}
309
310static inline int unix_writable(struct sock *sk)
311{
312 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
313}
314
315static void unix_write_space(struct sock *sk)
316{
317 read_lock(&sk->sk_callback_lock);
318 if (unix_writable(sk)) {
319 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
71e20f18 320 wake_up_interruptible_sync(sk->sk_sleep);
8d8ad9d7 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
322 }
323 read_unlock(&sk->sk_callback_lock);
324}
325
326/* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331{
b03efcfb 332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
339 */
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345}
346
347static void unix_sock_destructor(struct sock *sk)
348{
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
547b792c
IJ
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
1da177e4
LT
356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk("Attempt to release alive unix socket: %p\n", sk);
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
364 atomic_dec(&unix_nr_socks);
365#ifdef UNIX_REFCNT_DEBUG
366 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
367#endif
368}
369
370static int unix_release_sock (struct sock *sk, int embrion)
371{
372 struct unix_sock *u = unix_sk(sk);
373 struct dentry *dentry;
374 struct vfsmount *mnt;
375 struct sock *skpair;
376 struct sk_buff *skb;
377 int state;
378
379 unix_remove_socket(sk);
380
381 /* Clear state */
1c92b4e5 382 unix_state_lock(sk);
1da177e4
LT
383 sock_orphan(sk);
384 sk->sk_shutdown = SHUTDOWN_MASK;
385 dentry = u->dentry;
386 u->dentry = NULL;
387 mnt = u->mnt;
388 u->mnt = NULL;
389 state = sk->sk_state;
390 sk->sk_state = TCP_CLOSE;
1c92b4e5 391 unix_state_unlock(sk);
1da177e4
LT
392
393 wake_up_interruptible_all(&u->peer_wait);
394
395 skpair=unix_peer(sk);
396
397 if (skpair!=NULL) {
398 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 399 unix_state_lock(skpair);
1da177e4
LT
400 /* No more writes */
401 skpair->sk_shutdown = SHUTDOWN_MASK;
402 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
403 skpair->sk_err = ECONNRESET;
1c92b4e5 404 unix_state_unlock(skpair);
1da177e4
LT
405 skpair->sk_state_change(skpair);
406 read_lock(&skpair->sk_callback_lock);
8d8ad9d7 407 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
408 read_unlock(&skpair->sk_callback_lock);
409 }
410 sock_put(skpair); /* It may now die */
411 unix_peer(sk) = NULL;
412 }
413
414 /* Try to flush out this socket. Throw out buffers at least */
415
416 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
417 if (state==TCP_LISTEN)
418 unix_release_sock(skb->sk, 1);
419 /* passed fds are erased in the kfree_skb hook */
420 kfree_skb(skb);
421 }
422
423 if (dentry) {
424 dput(dentry);
425 mntput(mnt);
426 }
427
428 sock_put(sk);
429
430 /* ---- Socket is dead now and most probably destroyed ---- */
431
432 /*
433 * Fixme: BSD difference: In BSD all sockets connected to use get
434 * ECONNRESET and we die on the spot. In Linux we behave
435 * like files and pipes do and wait for the last
436 * dereference.
437 *
438 * Can't we simply set sock->err?
439 *
440 * What the above comment does talk about? --ANK(980817)
441 */
442
9305cfa4 443 if (unix_tot_inflight)
ac7bfa62 444 unix_gc(); /* Garbage collect fds */
1da177e4
LT
445
446 return 0;
447}
448
449static int unix_listen(struct socket *sock, int backlog)
450{
451 int err;
452 struct sock *sk = sock->sk;
453 struct unix_sock *u = unix_sk(sk);
454
455 err = -EOPNOTSUPP;
456 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
457 goto out; /* Only stream/seqpacket sockets accept */
458 err = -EINVAL;
459 if (!u->addr)
460 goto out; /* No listens on an unbound socket */
1c92b4e5 461 unix_state_lock(sk);
1da177e4
LT
462 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
463 goto out_unlock;
464 if (backlog > sk->sk_max_ack_backlog)
465 wake_up_interruptible_all(&u->peer_wait);
466 sk->sk_max_ack_backlog = backlog;
467 sk->sk_state = TCP_LISTEN;
468 /* set credentials so connect can copy them */
b488893a 469 sk->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
470 sk->sk_peercred.uid = current->euid;
471 sk->sk_peercred.gid = current->egid;
472 err = 0;
473
474out_unlock:
1c92b4e5 475 unix_state_unlock(sk);
1da177e4
LT
476out:
477 return err;
478}
479
480static int unix_release(struct socket *);
481static int unix_bind(struct socket *, struct sockaddr *, int);
482static int unix_stream_connect(struct socket *, struct sockaddr *,
483 int addr_len, int flags);
484static int unix_socketpair(struct socket *, struct socket *);
485static int unix_accept(struct socket *, struct socket *, int);
486static int unix_getname(struct socket *, struct sockaddr *, int *, int);
487static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
488static unsigned int unix_dgram_poll(struct file *, struct socket *,
489 poll_table *);
1da177e4
LT
490static int unix_ioctl(struct socket *, unsigned int, unsigned long);
491static int unix_shutdown(struct socket *, int);
492static int unix_stream_sendmsg(struct kiocb *, struct socket *,
493 struct msghdr *, size_t);
494static int unix_stream_recvmsg(struct kiocb *, struct socket *,
495 struct msghdr *, size_t, int);
496static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
497 struct msghdr *, size_t);
498static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
499 struct msghdr *, size_t, int);
500static int unix_dgram_connect(struct socket *, struct sockaddr *,
501 int, int);
502static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
503 struct msghdr *, size_t);
504
90ddc4f0 505static const struct proto_ops unix_stream_ops = {
1da177e4
LT
506 .family = PF_UNIX,
507 .owner = THIS_MODULE,
508 .release = unix_release,
509 .bind = unix_bind,
510 .connect = unix_stream_connect,
511 .socketpair = unix_socketpair,
512 .accept = unix_accept,
513 .getname = unix_getname,
514 .poll = unix_poll,
515 .ioctl = unix_ioctl,
516 .listen = unix_listen,
517 .shutdown = unix_shutdown,
518 .setsockopt = sock_no_setsockopt,
519 .getsockopt = sock_no_getsockopt,
520 .sendmsg = unix_stream_sendmsg,
521 .recvmsg = unix_stream_recvmsg,
522 .mmap = sock_no_mmap,
523 .sendpage = sock_no_sendpage,
524};
525
90ddc4f0 526static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
527 .family = PF_UNIX,
528 .owner = THIS_MODULE,
529 .release = unix_release,
530 .bind = unix_bind,
531 .connect = unix_dgram_connect,
532 .socketpair = unix_socketpair,
533 .accept = sock_no_accept,
534 .getname = unix_getname,
ec0d215f 535 .poll = unix_dgram_poll,
1da177e4
LT
536 .ioctl = unix_ioctl,
537 .listen = sock_no_listen,
538 .shutdown = unix_shutdown,
539 .setsockopt = sock_no_setsockopt,
540 .getsockopt = sock_no_getsockopt,
541 .sendmsg = unix_dgram_sendmsg,
542 .recvmsg = unix_dgram_recvmsg,
543 .mmap = sock_no_mmap,
544 .sendpage = sock_no_sendpage,
545};
546
90ddc4f0 547static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
548 .family = PF_UNIX,
549 .owner = THIS_MODULE,
550 .release = unix_release,
551 .bind = unix_bind,
552 .connect = unix_stream_connect,
553 .socketpair = unix_socketpair,
554 .accept = unix_accept,
555 .getname = unix_getname,
ec0d215f 556 .poll = unix_dgram_poll,
1da177e4
LT
557 .ioctl = unix_ioctl,
558 .listen = unix_listen,
559 .shutdown = unix_shutdown,
560 .setsockopt = sock_no_setsockopt,
561 .getsockopt = sock_no_getsockopt,
562 .sendmsg = unix_seqpacket_sendmsg,
563 .recvmsg = unix_dgram_recvmsg,
564 .mmap = sock_no_mmap,
565 .sendpage = sock_no_sendpage,
566};
567
568static struct proto unix_proto = {
569 .name = "UNIX",
570 .owner = THIS_MODULE,
571 .obj_size = sizeof(struct unix_sock),
572};
573
a09785a2
IM
574/*
575 * AF_UNIX sockets do not interact with hardware, hence they
576 * dont trigger interrupts - so it's safe for them to have
577 * bh-unsafe locking for their sk_receive_queue.lock. Split off
578 * this special lock-class by reinitializing the spinlock key:
579 */
580static struct lock_class_key af_unix_sk_receive_queue_lock_key;
581
1b8d7ae4 582static struct sock * unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
583{
584 struct sock *sk = NULL;
585 struct unix_sock *u;
586
284b327b
PE
587 atomic_inc(&unix_nr_socks);
588 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
589 goto out;
590
6257ff21 591 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
592 if (!sk)
593 goto out;
594
1da177e4 595 sock_init_data(sock,sk);
a09785a2
IM
596 lockdep_set_class(&sk->sk_receive_queue.lock,
597 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
598
599 sk->sk_write_space = unix_write_space;
a0a53c8b 600 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
601 sk->sk_destruct = unix_sock_destructor;
602 u = unix_sk(sk);
603 u->dentry = NULL;
604 u->mnt = NULL;
fd19f329 605 spin_lock_init(&u->lock);
516e0cc5 606 atomic_long_set(&u->inflight, 0);
1fd05ba5 607 INIT_LIST_HEAD(&u->link);
57b47a53 608 mutex_init(&u->readlock); /* single task reading lock */
1da177e4
LT
609 init_waitqueue_head(&u->peer_wait);
610 unix_insert_socket(unix_sockets_unbound, sk);
611out:
284b327b
PE
612 if (sk == NULL)
613 atomic_dec(&unix_nr_socks);
1da177e4
LT
614 return sk;
615}
616
1b8d7ae4 617static int unix_create(struct net *net, struct socket *sock, int protocol)
1da177e4
LT
618{
619 if (protocol && protocol != PF_UNIX)
620 return -EPROTONOSUPPORT;
621
622 sock->state = SS_UNCONNECTED;
623
624 switch (sock->type) {
625 case SOCK_STREAM:
626 sock->ops = &unix_stream_ops;
627 break;
628 /*
629 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
630 * nothing uses it.
631 */
632 case SOCK_RAW:
633 sock->type=SOCK_DGRAM;
634 case SOCK_DGRAM:
635 sock->ops = &unix_dgram_ops;
636 break;
637 case SOCK_SEQPACKET:
638 sock->ops = &unix_seqpacket_ops;
639 break;
640 default:
641 return -ESOCKTNOSUPPORT;
642 }
643
1b8d7ae4 644 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
645}
646
647static int unix_release(struct socket *sock)
648{
649 struct sock *sk = sock->sk;
650
651 if (!sk)
652 return 0;
653
654 sock->sk = NULL;
655
656 return unix_release_sock (sk, 0);
657}
658
659static int unix_autobind(struct socket *sock)
660{
661 struct sock *sk = sock->sk;
3b1e0a65 662 struct net *net = sock_net(sk);
1da177e4
LT
663 struct unix_sock *u = unix_sk(sk);
664 static u32 ordernum = 1;
665 struct unix_address * addr;
666 int err;
667
57b47a53 668 mutex_lock(&u->readlock);
1da177e4
LT
669
670 err = 0;
671 if (u->addr)
672 goto out;
673
674 err = -ENOMEM;
0da974f4 675 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
676 if (!addr)
677 goto out;
678
1da177e4
LT
679 addr->name->sun_family = AF_UNIX;
680 atomic_set(&addr->refcnt, 1);
681
682retry:
683 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
684 addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
685
fbe9cc4a 686 spin_lock(&unix_table_lock);
1da177e4
LT
687 ordernum = (ordernum+1)&0xFFFFF;
688
097e66c5 689 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 690 addr->hash)) {
fbe9cc4a 691 spin_unlock(&unix_table_lock);
1da177e4
LT
692 /* Sanity yield. It is unusual case, but yet... */
693 if (!(ordernum&0xFF))
694 yield();
695 goto retry;
696 }
697 addr->hash ^= sk->sk_type;
698
699 __unix_remove_socket(sk);
700 u->addr = addr;
701 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 702 spin_unlock(&unix_table_lock);
1da177e4
LT
703 err = 0;
704
57b47a53 705out: mutex_unlock(&u->readlock);
1da177e4
LT
706 return err;
707}
708
097e66c5
DL
709static struct sock *unix_find_other(struct net *net,
710 struct sockaddr_un *sunname, int len,
1da177e4
LT
711 int type, unsigned hash, int *error)
712{
713 struct sock *u;
421748ec 714 struct path path;
1da177e4 715 int err = 0;
ac7bfa62 716
1da177e4 717 if (sunname->sun_path[0]) {
421748ec
AV
718 struct inode *inode;
719 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
720 if (err)
721 goto fail;
421748ec
AV
722 inode = path.dentry->d_inode;
723 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
724 if (err)
725 goto put_fail;
726
727 err = -ECONNREFUSED;
421748ec 728 if (!S_ISSOCK(inode->i_mode))
1da177e4 729 goto put_fail;
421748ec 730 u = unix_find_socket_byinode(net, inode);
1da177e4
LT
731 if (!u)
732 goto put_fail;
733
734 if (u->sk_type == type)
421748ec 735 touch_atime(path.mnt, path.dentry);
1da177e4 736
421748ec 737 path_put(&path);
1da177e4
LT
738
739 err=-EPROTOTYPE;
740 if (u->sk_type != type) {
741 sock_put(u);
742 goto fail;
743 }
744 } else {
745 err = -ECONNREFUSED;
097e66c5 746 u=unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
747 if (u) {
748 struct dentry *dentry;
749 dentry = unix_sk(u)->dentry;
750 if (dentry)
751 touch_atime(unix_sk(u)->mnt, dentry);
752 } else
753 goto fail;
754 }
755 return u;
756
757put_fail:
421748ec 758 path_put(&path);
1da177e4
LT
759fail:
760 *error=err;
761 return NULL;
762}
763
764
765static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
766{
767 struct sock *sk = sock->sk;
3b1e0a65 768 struct net *net = sock_net(sk);
1da177e4
LT
769 struct unix_sock *u = unix_sk(sk);
770 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
771 struct dentry * dentry = NULL;
772 struct nameidata nd;
773 int err;
774 unsigned hash;
775 struct unix_address *addr;
776 struct hlist_head *list;
777
778 err = -EINVAL;
779 if (sunaddr->sun_family != AF_UNIX)
780 goto out;
781
782 if (addr_len==sizeof(short)) {
783 err = unix_autobind(sock);
784 goto out;
785 }
786
787 err = unix_mkname(sunaddr, addr_len, &hash);
788 if (err < 0)
789 goto out;
790 addr_len = err;
791
57b47a53 792 mutex_lock(&u->readlock);
1da177e4
LT
793
794 err = -EINVAL;
795 if (u->addr)
796 goto out_up;
797
798 err = -ENOMEM;
799 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
800 if (!addr)
801 goto out_up;
802
803 memcpy(addr->name, sunaddr, addr_len);
804 addr->len = addr_len;
805 addr->hash = hash ^ sk->sk_type;
806 atomic_set(&addr->refcnt, 1);
807
808 if (sunaddr->sun_path[0]) {
809 unsigned int mode;
810 err = 0;
811 /*
812 * Get the parent directory, calculate the hash for last
813 * component.
814 */
815 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
816 if (err)
817 goto out_mknod_parent;
f81a0bff
CH
818
819 dentry = lookup_create(&nd, 0);
1da177e4
LT
820 err = PTR_ERR(dentry);
821 if (IS_ERR(dentry))
822 goto out_mknod_unlock;
f81a0bff 823
1da177e4
LT
824 /*
825 * All right, let's create it.
826 */
827 mode = S_IFSOCK |
828 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
463c3197
DH
829 err = mnt_want_write(nd.path.mnt);
830 if (err)
831 goto out_mknod_dput;
4ac91378 832 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
463c3197 833 mnt_drop_write(nd.path.mnt);
1da177e4
LT
834 if (err)
835 goto out_mknod_dput;
4ac91378
JB
836 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
837 dput(nd.path.dentry);
838 nd.path.dentry = dentry;
1da177e4
LT
839
840 addr->hash = UNIX_HASH_SIZE;
841 }
842
fbe9cc4a 843 spin_lock(&unix_table_lock);
1da177e4
LT
844
845 if (!sunaddr->sun_path[0]) {
846 err = -EADDRINUSE;
097e66c5 847 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
848 sk->sk_type, hash)) {
849 unix_release_addr(addr);
850 goto out_unlock;
851 }
852
853 list = &unix_socket_table[addr->hash];
854 } else {
855 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
4ac91378
JB
856 u->dentry = nd.path.dentry;
857 u->mnt = nd.path.mnt;
1da177e4
LT
858 }
859
860 err = 0;
861 __unix_remove_socket(sk);
862 u->addr = addr;
863 __unix_insert_socket(list, sk);
864
865out_unlock:
fbe9cc4a 866 spin_unlock(&unix_table_lock);
1da177e4 867out_up:
57b47a53 868 mutex_unlock(&u->readlock);
1da177e4
LT
869out:
870 return err;
871
872out_mknod_dput:
873 dput(dentry);
874out_mknod_unlock:
4ac91378 875 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
1d957f9b 876 path_put(&nd.path);
1da177e4
LT
877out_mknod_parent:
878 if (err==-EEXIST)
879 err=-EADDRINUSE;
880 unix_release_addr(addr);
881 goto out_up;
882}
883
278a3de5
DM
884static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
885{
886 if (unlikely(sk1 == sk2) || !sk2) {
887 unix_state_lock(sk1);
888 return;
889 }
890 if (sk1 < sk2) {
891 unix_state_lock(sk1);
892 unix_state_lock_nested(sk2);
893 } else {
894 unix_state_lock(sk2);
895 unix_state_lock_nested(sk1);
896 }
897}
898
899static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
900{
901 if (unlikely(sk1 == sk2) || !sk2) {
902 unix_state_unlock(sk1);
903 return;
904 }
905 unix_state_unlock(sk1);
906 unix_state_unlock(sk2);
907}
908
1da177e4
LT
909static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
910 int alen, int flags)
911{
912 struct sock *sk = sock->sk;
3b1e0a65 913 struct net *net = sock_net(sk);
1da177e4
LT
914 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
915 struct sock *other;
916 unsigned hash;
917 int err;
918
919 if (addr->sa_family != AF_UNSPEC) {
920 err = unix_mkname(sunaddr, alen, &hash);
921 if (err < 0)
922 goto out;
923 alen = err;
924
925 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
926 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
927 goto out;
928
278a3de5 929restart:
097e66c5 930 other=unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
931 if (!other)
932 goto out;
933
278a3de5
DM
934 unix_state_double_lock(sk, other);
935
936 /* Apparently VFS overslept socket death. Retry. */
937 if (sock_flag(other, SOCK_DEAD)) {
938 unix_state_double_unlock(sk, other);
939 sock_put(other);
940 goto restart;
941 }
1da177e4
LT
942
943 err = -EPERM;
944 if (!unix_may_send(sk, other))
945 goto out_unlock;
946
947 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
948 if (err)
949 goto out_unlock;
950
951 } else {
952 /*
953 * 1003.1g breaking connected state with AF_UNSPEC
954 */
955 other = NULL;
278a3de5 956 unix_state_double_lock(sk, other);
1da177e4
LT
957 }
958
959 /*
960 * If it was connected, reconnect.
961 */
962 if (unix_peer(sk)) {
963 struct sock *old_peer = unix_peer(sk);
964 unix_peer(sk)=other;
278a3de5 965 unix_state_double_unlock(sk, other);
1da177e4
LT
966
967 if (other != old_peer)
968 unix_dgram_disconnected(sk, old_peer);
969 sock_put(old_peer);
970 } else {
971 unix_peer(sk)=other;
278a3de5 972 unix_state_double_unlock(sk, other);
1da177e4 973 }
ac7bfa62 974 return 0;
1da177e4
LT
975
976out_unlock:
278a3de5 977 unix_state_double_unlock(sk, other);
1da177e4
LT
978 sock_put(other);
979out:
980 return err;
981}
982
983static long unix_wait_for_peer(struct sock *other, long timeo)
984{
985 struct unix_sock *u = unix_sk(other);
986 int sched;
987 DEFINE_WAIT(wait);
988
989 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
990
991 sched = !sock_flag(other, SOCK_DEAD) &&
992 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 993 unix_recvq_full(other);
1da177e4 994
1c92b4e5 995 unix_state_unlock(other);
1da177e4
LT
996
997 if (sched)
998 timeo = schedule_timeout(timeo);
999
1000 finish_wait(&u->peer_wait, &wait);
1001 return timeo;
1002}
1003
1004static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1005 int addr_len, int flags)
1006{
1007 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1008 struct sock *sk = sock->sk;
3b1e0a65 1009 struct net *net = sock_net(sk);
1da177e4
LT
1010 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1011 struct sock *newsk = NULL;
1012 struct sock *other = NULL;
1013 struct sk_buff *skb = NULL;
1014 unsigned hash;
1015 int st;
1016 int err;
1017 long timeo;
1018
1019 err = unix_mkname(sunaddr, addr_len, &hash);
1020 if (err < 0)
1021 goto out;
1022 addr_len = err;
1023
1024 if (test_bit(SOCK_PASSCRED, &sock->flags)
1025 && !u->addr && (err = unix_autobind(sock)) != 0)
1026 goto out;
1027
1028 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1029
1030 /* First of all allocate resources.
1031 If we will make it after state is locked,
1032 we will have to recheck all again in any case.
1033 */
1034
1035 err = -ENOMEM;
1036
1037 /* create new sock for complete connection */
3b1e0a65 1038 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1039 if (newsk == NULL)
1040 goto out;
1041
1042 /* Allocate skb for sending to listening sock */
1043 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1044 if (skb == NULL)
1045 goto out;
1046
1047restart:
1048 /* Find listening sock. */
097e66c5 1049 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1050 if (!other)
1051 goto out;
1052
1053 /* Latch state of peer */
1c92b4e5 1054 unix_state_lock(other);
1da177e4
LT
1055
1056 /* Apparently VFS overslept socket death. Retry. */
1057 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1058 unix_state_unlock(other);
1da177e4
LT
1059 sock_put(other);
1060 goto restart;
1061 }
1062
1063 err = -ECONNREFUSED;
1064 if (other->sk_state != TCP_LISTEN)
1065 goto out_unlock;
1066
3c73419c 1067 if (unix_recvq_full(other)) {
1da177e4
LT
1068 err = -EAGAIN;
1069 if (!timeo)
1070 goto out_unlock;
1071
1072 timeo = unix_wait_for_peer(other, timeo);
1073
1074 err = sock_intr_errno(timeo);
1075 if (signal_pending(current))
1076 goto out;
1077 sock_put(other);
1078 goto restart;
ac7bfa62 1079 }
1da177e4
LT
1080
1081 /* Latch our state.
1082
1083 It is tricky place. We need to grab write lock and cannot
1084 drop lock on peer. It is dangerous because deadlock is
1085 possible. Connect to self case and simultaneous
1086 attempt to connect are eliminated by checking socket
1087 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1088 check this before attempt to grab lock.
1089
1090 Well, and we have to recheck the state after socket locked.
1091 */
1092 st = sk->sk_state;
1093
1094 switch (st) {
1095 case TCP_CLOSE:
1096 /* This is ok... continue with connect */
1097 break;
1098 case TCP_ESTABLISHED:
1099 /* Socket is already connected */
1100 err = -EISCONN;
1101 goto out_unlock;
1102 default:
1103 err = -EINVAL;
1104 goto out_unlock;
1105 }
1106
1c92b4e5 1107 unix_state_lock_nested(sk);
1da177e4
LT
1108
1109 if (sk->sk_state != st) {
1c92b4e5
DM
1110 unix_state_unlock(sk);
1111 unix_state_unlock(other);
1da177e4
LT
1112 sock_put(other);
1113 goto restart;
1114 }
1115
1116 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1117 if (err) {
1c92b4e5 1118 unix_state_unlock(sk);
1da177e4
LT
1119 goto out_unlock;
1120 }
1121
1122 /* The way is open! Fastly set all the necessary fields... */
1123
1124 sock_hold(sk);
1125 unix_peer(newsk) = sk;
1126 newsk->sk_state = TCP_ESTABLISHED;
1127 newsk->sk_type = sk->sk_type;
b488893a 1128 newsk->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
1129 newsk->sk_peercred.uid = current->euid;
1130 newsk->sk_peercred.gid = current->egid;
1131 newu = unix_sk(newsk);
1132 newsk->sk_sleep = &newu->peer_wait;
1133 otheru = unix_sk(other);
1134
1135 /* copy address information from listening to new sock*/
1136 if (otheru->addr) {
1137 atomic_inc(&otheru->addr->refcnt);
1138 newu->addr = otheru->addr;
1139 }
1140 if (otheru->dentry) {
1141 newu->dentry = dget(otheru->dentry);
1142 newu->mnt = mntget(otheru->mnt);
1143 }
1144
1145 /* Set credentials */
1146 sk->sk_peercred = other->sk_peercred;
1147
1da177e4
LT
1148 sock->state = SS_CONNECTED;
1149 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1150 sock_hold(newsk);
1151
1152 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1153 unix_peer(sk) = newsk;
1da177e4 1154
1c92b4e5 1155 unix_state_unlock(sk);
1da177e4
LT
1156
1157 /* take ten and and send info to listening sock */
1158 spin_lock(&other->sk_receive_queue.lock);
1159 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1160 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1161 unix_state_unlock(other);
1da177e4
LT
1162 other->sk_data_ready(other, 0);
1163 sock_put(other);
1164 return 0;
1165
1166out_unlock:
1167 if (other)
1c92b4e5 1168 unix_state_unlock(other);
1da177e4
LT
1169
1170out:
1171 if (skb)
1172 kfree_skb(skb);
1173 if (newsk)
1174 unix_release_sock(newsk, 0);
1175 if (other)
1176 sock_put(other);
1177 return err;
1178}
1179
1180static int unix_socketpair(struct socket *socka, struct socket *sockb)
1181{
1182 struct sock *ska=socka->sk, *skb = sockb->sk;
1183
1184 /* Join our sockets back to back */
1185 sock_hold(ska);
1186 sock_hold(skb);
1187 unix_peer(ska)=skb;
1188 unix_peer(skb)=ska;
b488893a 1189 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
1190 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1191 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1192
1193 if (ska->sk_type != SOCK_DGRAM) {
1194 ska->sk_state = TCP_ESTABLISHED;
1195 skb->sk_state = TCP_ESTABLISHED;
1196 socka->state = SS_CONNECTED;
1197 sockb->state = SS_CONNECTED;
1198 }
1199 return 0;
1200}
1201
1202static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1203{
1204 struct sock *sk = sock->sk;
1205 struct sock *tsk;
1206 struct sk_buff *skb;
1207 int err;
1208
1209 err = -EOPNOTSUPP;
1210 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1211 goto out;
1212
1213 err = -EINVAL;
1214 if (sk->sk_state != TCP_LISTEN)
1215 goto out;
1216
1217 /* If socket state is TCP_LISTEN it cannot change (for now...),
1218 * so that no locks are necessary.
1219 */
1220
1221 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1222 if (!skb) {
1223 /* This means receive shutdown. */
1224 if (err == 0)
1225 err = -EINVAL;
1226 goto out;
1227 }
1228
1229 tsk = skb->sk;
1230 skb_free_datagram(sk, skb);
1231 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1232
1233 /* attach accepted sock to socket */
1c92b4e5 1234 unix_state_lock(tsk);
1da177e4
LT
1235 newsock->state = SS_CONNECTED;
1236 sock_graft(tsk, newsock);
1c92b4e5 1237 unix_state_unlock(tsk);
1da177e4
LT
1238 return 0;
1239
1240out:
1241 return err;
1242}
1243
1244
1245static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1246{
1247 struct sock *sk = sock->sk;
1248 struct unix_sock *u;
1249 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1250 int err = 0;
1251
1252 if (peer) {
1253 sk = unix_peer_get(sk);
1254
1255 err = -ENOTCONN;
1256 if (!sk)
1257 goto out;
1258 err = 0;
1259 } else {
1260 sock_hold(sk);
1261 }
1262
1263 u = unix_sk(sk);
1c92b4e5 1264 unix_state_lock(sk);
1da177e4
LT
1265 if (!u->addr) {
1266 sunaddr->sun_family = AF_UNIX;
1267 sunaddr->sun_path[0] = 0;
1268 *uaddr_len = sizeof(short);
1269 } else {
1270 struct unix_address *addr = u->addr;
1271
1272 *uaddr_len = addr->len;
1273 memcpy(sunaddr, addr->name, *uaddr_len);
1274 }
1c92b4e5 1275 unix_state_unlock(sk);
1da177e4
LT
1276 sock_put(sk);
1277out:
1278 return err;
1279}
1280
1281static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1282{
1283 int i;
1284
1285 scm->fp = UNIXCB(skb).fp;
1286 skb->destructor = sock_wfree;
1287 UNIXCB(skb).fp = NULL;
1288
1289 for (i=scm->fp->count-1; i>=0; i--)
1290 unix_notinflight(scm->fp->fp[i]);
1291}
1292
1293static void unix_destruct_fds(struct sk_buff *skb)
1294{
1295 struct scm_cookie scm;
1296 memset(&scm, 0, sizeof(scm));
1297 unix_detach_fds(&scm, skb);
1298
1299 /* Alas, it calls VFS */
1300 /* So fscking what? fput() had been SMP-safe since the last Summer */
1301 scm_destroy(&scm);
1302 sock_wfree(skb);
1303}
1304
6209344f 1305static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1306{
1307 int i;
6209344f
MS
1308
1309 /*
1310 * Need to duplicate file references for the sake of garbage
1311 * collection. Otherwise a socket in the fps might become a
1312 * candidate for GC while the skb is not yet queued.
1313 */
1314 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1315 if (!UNIXCB(skb).fp)
1316 return -ENOMEM;
1317
1da177e4
LT
1318 for (i=scm->fp->count-1; i>=0; i--)
1319 unix_inflight(scm->fp->fp[i]);
1da177e4 1320 skb->destructor = unix_destruct_fds;
6209344f 1321 return 0;
1da177e4
LT
1322}
1323
1324/*
1325 * Send AF_UNIX data.
1326 */
1327
1328static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1329 struct msghdr *msg, size_t len)
1330{
1331 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1332 struct sock *sk = sock->sk;
3b1e0a65 1333 struct net *net = sock_net(sk);
1da177e4
LT
1334 struct unix_sock *u = unix_sk(sk);
1335 struct sockaddr_un *sunaddr=msg->msg_name;
1336 struct sock *other = NULL;
1337 int namelen = 0; /* fake GCC */
1338 int err;
1339 unsigned hash;
1340 struct sk_buff *skb;
1341 long timeo;
1342 struct scm_cookie tmp_scm;
1343
1344 if (NULL == siocb->scm)
1345 siocb->scm = &tmp_scm;
1346 err = scm_send(sock, msg, siocb->scm);
1347 if (err < 0)
1348 return err;
1349
1350 err = -EOPNOTSUPP;
1351 if (msg->msg_flags&MSG_OOB)
1352 goto out;
1353
1354 if (msg->msg_namelen) {
1355 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1356 if (err < 0)
1357 goto out;
1358 namelen = err;
1359 } else {
1360 sunaddr = NULL;
1361 err = -ENOTCONN;
1362 other = unix_peer_get(sk);
1363 if (!other)
1364 goto out;
1365 }
1366
1367 if (test_bit(SOCK_PASSCRED, &sock->flags)
1368 && !u->addr && (err = unix_autobind(sock)) != 0)
1369 goto out;
1370
1371 err = -EMSGSIZE;
1372 if (len > sk->sk_sndbuf - 32)
1373 goto out;
1374
1375 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1376 if (skb==NULL)
1377 goto out;
1378
1379 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1380 if (siocb->scm->fp) {
1381 err = unix_attach_fds(siocb->scm, skb);
1382 if (err)
1383 goto out_free;
1384 }
dc49c1f9 1385 unix_get_secdata(siocb->scm, skb);
877ce7c1 1386
badff6d0 1387 skb_reset_transport_header(skb);
1da177e4
LT
1388 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1389 if (err)
1390 goto out_free;
1391
1392 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1393
1394restart:
1395 if (!other) {
1396 err = -ECONNRESET;
1397 if (sunaddr == NULL)
1398 goto out_free;
1399
097e66c5 1400 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4
LT
1401 hash, &err);
1402 if (other==NULL)
1403 goto out_free;
1404 }
1405
1c92b4e5 1406 unix_state_lock(other);
1da177e4
LT
1407 err = -EPERM;
1408 if (!unix_may_send(sk, other))
1409 goto out_unlock;
1410
1411 if (sock_flag(other, SOCK_DEAD)) {
1412 /*
1413 * Check with 1003.1g - what should
1414 * datagram error
1415 */
1c92b4e5 1416 unix_state_unlock(other);
1da177e4
LT
1417 sock_put(other);
1418
1419 err = 0;
1c92b4e5 1420 unix_state_lock(sk);
1da177e4
LT
1421 if (unix_peer(sk) == other) {
1422 unix_peer(sk)=NULL;
1c92b4e5 1423 unix_state_unlock(sk);
1da177e4
LT
1424
1425 unix_dgram_disconnected(sk, other);
1426 sock_put(other);
1427 err = -ECONNREFUSED;
1428 } else {
1c92b4e5 1429 unix_state_unlock(sk);
1da177e4
LT
1430 }
1431
1432 other = NULL;
1433 if (err)
1434 goto out_free;
1435 goto restart;
1436 }
1437
1438 err = -EPIPE;
1439 if (other->sk_shutdown & RCV_SHUTDOWN)
1440 goto out_unlock;
1441
1442 if (sk->sk_type != SOCK_SEQPACKET) {
1443 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1444 if (err)
1445 goto out_unlock;
1446 }
1447
3c73419c 1448 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1449 if (!timeo) {
1450 err = -EAGAIN;
1451 goto out_unlock;
1452 }
1453
1454 timeo = unix_wait_for_peer(other, timeo);
1455
1456 err = sock_intr_errno(timeo);
1457 if (signal_pending(current))
1458 goto out_free;
1459
1460 goto restart;
1461 }
1462
1463 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1464 unix_state_unlock(other);
1da177e4
LT
1465 other->sk_data_ready(other, len);
1466 sock_put(other);
1467 scm_destroy(siocb->scm);
1468 return len;
1469
1470out_unlock:
1c92b4e5 1471 unix_state_unlock(other);
1da177e4
LT
1472out_free:
1473 kfree_skb(skb);
1474out:
1475 if (other)
1476 sock_put(other);
1477 scm_destroy(siocb->scm);
1478 return err;
1479}
1480
ac7bfa62 1481
1da177e4
LT
1482static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1483 struct msghdr *msg, size_t len)
1484{
1485 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1486 struct sock *sk = sock->sk;
1487 struct sock *other = NULL;
1488 struct sockaddr_un *sunaddr=msg->msg_name;
1489 int err,size;
1490 struct sk_buff *skb;
1491 int sent=0;
1492 struct scm_cookie tmp_scm;
1493
1494 if (NULL == siocb->scm)
1495 siocb->scm = &tmp_scm;
1496 err = scm_send(sock, msg, siocb->scm);
1497 if (err < 0)
1498 return err;
1499
1500 err = -EOPNOTSUPP;
1501 if (msg->msg_flags&MSG_OOB)
1502 goto out_err;
1503
1504 if (msg->msg_namelen) {
1505 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1506 goto out_err;
1507 } else {
1508 sunaddr = NULL;
1509 err = -ENOTCONN;
830a1e5c 1510 other = unix_peer(sk);
1da177e4
LT
1511 if (!other)
1512 goto out_err;
1513 }
1514
1515 if (sk->sk_shutdown & SEND_SHUTDOWN)
1516 goto pipe_err;
1517
1518 while(sent < len)
1519 {
1520 /*
e9df7d7f
BL
1521 * Optimisation for the fact that under 0.01% of X
1522 * messages typically need breaking up.
1da177e4
LT
1523 */
1524
e9df7d7f 1525 size = len-sent;
1da177e4
LT
1526
1527 /* Keep two messages in the pipe so it schedules better */
e9df7d7f
BL
1528 if (size > ((sk->sk_sndbuf >> 1) - 64))
1529 size = (sk->sk_sndbuf >> 1) - 64;
1da177e4
LT
1530
1531 if (size > SKB_MAX_ALLOC)
1532 size = SKB_MAX_ALLOC;
ac7bfa62 1533
1da177e4
LT
1534 /*
1535 * Grab a buffer
1536 */
ac7bfa62 1537
1da177e4
LT
1538 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1539
1540 if (skb==NULL)
1541 goto out_err;
1542
1543 /*
1544 * If you pass two values to the sock_alloc_send_skb
1545 * it tries to grab the large buffer with GFP_NOFS
1546 * (which can fail easily), and if it fails grab the
1547 * fallback size buffer which is under a page and will
1548 * succeed. [Alan]
1549 */
1550 size = min_t(int, size, skb_tailroom(skb));
1551
1552 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1553 if (siocb->scm->fp) {
1554 err = unix_attach_fds(siocb->scm, skb);
1555 if (err) {
1556 kfree_skb(skb);
1557 goto out_err;
1558 }
1559 }
1da177e4
LT
1560
1561 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1562 kfree_skb(skb);
1563 goto out_err;
1564 }
1565
1c92b4e5 1566 unix_state_lock(other);
1da177e4
LT
1567
1568 if (sock_flag(other, SOCK_DEAD) ||
1569 (other->sk_shutdown & RCV_SHUTDOWN))
1570 goto pipe_err_free;
1571
1572 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1573 unix_state_unlock(other);
1da177e4
LT
1574 other->sk_data_ready(other, size);
1575 sent+=size;
1576 }
1da177e4
LT
1577
1578 scm_destroy(siocb->scm);
1579 siocb->scm = NULL;
1580
1581 return sent;
1582
1583pipe_err_free:
1c92b4e5 1584 unix_state_unlock(other);
1da177e4
LT
1585 kfree_skb(skb);
1586pipe_err:
1587 if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1588 send_sig(SIGPIPE,current,0);
1589 err = -EPIPE;
1590out_err:
1da177e4
LT
1591 scm_destroy(siocb->scm);
1592 siocb->scm = NULL;
1593 return sent ? : err;
1594}
1595
1596static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1597 struct msghdr *msg, size_t len)
1598{
1599 int err;
1600 struct sock *sk = sock->sk;
ac7bfa62 1601
1da177e4
LT
1602 err = sock_error(sk);
1603 if (err)
1604 return err;
1605
1606 if (sk->sk_state != TCP_ESTABLISHED)
1607 return -ENOTCONN;
1608
1609 if (msg->msg_namelen)
1610 msg->msg_namelen = 0;
1611
1612 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1613}
ac7bfa62 1614
1da177e4
LT
1615static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1616{
1617 struct unix_sock *u = unix_sk(sk);
1618
1619 msg->msg_namelen = 0;
1620 if (u->addr) {
1621 msg->msg_namelen = u->addr->len;
1622 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1623 }
1624}
1625
1626static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1627 struct msghdr *msg, size_t size,
1628 int flags)
1629{
1630 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1631 struct scm_cookie tmp_scm;
1632 struct sock *sk = sock->sk;
1633 struct unix_sock *u = unix_sk(sk);
1634 int noblock = flags & MSG_DONTWAIT;
1635 struct sk_buff *skb;
1636 int err;
1637
1638 err = -EOPNOTSUPP;
1639 if (flags&MSG_OOB)
1640 goto out;
1641
1642 msg->msg_namelen = 0;
1643
57b47a53 1644 mutex_lock(&u->readlock);
1da177e4
LT
1645
1646 skb = skb_recv_datagram(sk, flags, noblock, &err);
0a112258
FZ
1647 if (!skb) {
1648 unix_state_lock(sk);
1649 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1650 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1651 (sk->sk_shutdown & RCV_SHUTDOWN))
1652 err = 0;
1653 unix_state_unlock(sk);
1da177e4 1654 goto out_unlock;
0a112258 1655 }
1da177e4 1656
71e20f18 1657 wake_up_interruptible_sync(&u->peer_wait);
1da177e4
LT
1658
1659 if (msg->msg_name)
1660 unix_copy_addr(msg, skb->sk);
1661
1662 if (size > skb->len)
1663 size = skb->len;
1664 else if (size < skb->len)
1665 msg->msg_flags |= MSG_TRUNC;
1666
1667 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1668 if (err)
1669 goto out_free;
1670
1671 if (!siocb->scm) {
1672 siocb->scm = &tmp_scm;
1673 memset(&tmp_scm, 0, sizeof(tmp_scm));
1674 }
1675 siocb->scm->creds = *UNIXCREDS(skb);
877ce7c1 1676 unix_set_secdata(siocb->scm, skb);
1da177e4
LT
1677
1678 if (!(flags & MSG_PEEK))
1679 {
1680 if (UNIXCB(skb).fp)
1681 unix_detach_fds(siocb->scm, skb);
1682 }
ac7bfa62 1683 else
1da177e4
LT
1684 {
1685 /* It is questionable: on PEEK we could:
1686 - do not return fds - good, but too simple 8)
1687 - return fds, and do not return them on read (old strategy,
1688 apparently wrong)
1689 - clone fds (I chose it for now, it is the most universal
1690 solution)
ac7bfa62
YH
1691
1692 POSIX 1003.1g does not actually define this clearly
1693 at all. POSIX 1003.1g doesn't define a lot of things
1694 clearly however!
1695
1da177e4
LT
1696 */
1697 if (UNIXCB(skb).fp)
1698 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1699 }
1700 err = size;
1701
1702 scm_recv(sock, msg, siocb->scm, flags);
1703
1704out_free:
1705 skb_free_datagram(sk,skb);
1706out_unlock:
57b47a53 1707 mutex_unlock(&u->readlock);
1da177e4
LT
1708out:
1709 return err;
1710}
1711
1712/*
1713 * Sleep until data has arrive. But check for races..
1714 */
ac7bfa62 1715
1da177e4
LT
1716static long unix_stream_data_wait(struct sock * sk, long timeo)
1717{
1718 DEFINE_WAIT(wait);
1719
1c92b4e5 1720 unix_state_lock(sk);
1da177e4
LT
1721
1722 for (;;) {
1723 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1724
b03efcfb 1725 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1da177e4
LT
1726 sk->sk_err ||
1727 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1728 signal_pending(current) ||
1729 !timeo)
1730 break;
1731
1732 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1733 unix_state_unlock(sk);
1da177e4 1734 timeo = schedule_timeout(timeo);
1c92b4e5 1735 unix_state_lock(sk);
1da177e4
LT
1736 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1737 }
1738
1739 finish_wait(sk->sk_sleep, &wait);
1c92b4e5 1740 unix_state_unlock(sk);
1da177e4
LT
1741 return timeo;
1742}
1743
1744
1745
1746static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1747 struct msghdr *msg, size_t size,
1748 int flags)
1749{
1750 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1751 struct scm_cookie tmp_scm;
1752 struct sock *sk = sock->sk;
1753 struct unix_sock *u = unix_sk(sk);
1754 struct sockaddr_un *sunaddr=msg->msg_name;
1755 int copied = 0;
1756 int check_creds = 0;
1757 int target;
1758 int err = 0;
1759 long timeo;
1760
1761 err = -EINVAL;
1762 if (sk->sk_state != TCP_ESTABLISHED)
1763 goto out;
1764
1765 err = -EOPNOTSUPP;
1766 if (flags&MSG_OOB)
1767 goto out;
1768
1769 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1770 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1771
1772 msg->msg_namelen = 0;
1773
1774 /* Lock the socket to prevent queue disordering
1775 * while sleeps in memcpy_tomsg
1776 */
1777
1778 if (!siocb->scm) {
1779 siocb->scm = &tmp_scm;
1780 memset(&tmp_scm, 0, sizeof(tmp_scm));
1781 }
1782
57b47a53 1783 mutex_lock(&u->readlock);
1da177e4
LT
1784
1785 do
1786 {
1787 int chunk;
1788 struct sk_buff *skb;
1789
3c0d2f37 1790 unix_state_lock(sk);
1da177e4
LT
1791 skb = skb_dequeue(&sk->sk_receive_queue);
1792 if (skb==NULL)
1793 {
1794 if (copied >= target)
3c0d2f37 1795 goto unlock;
1da177e4
LT
1796
1797 /*
1798 * POSIX 1003.1g mandates this order.
1799 */
ac7bfa62 1800
1da177e4 1801 if ((err = sock_error(sk)) != 0)
3c0d2f37 1802 goto unlock;
1da177e4 1803 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1804 goto unlock;
1805
1806 unix_state_unlock(sk);
1da177e4
LT
1807 err = -EAGAIN;
1808 if (!timeo)
1809 break;
57b47a53 1810 mutex_unlock(&u->readlock);
1da177e4
LT
1811
1812 timeo = unix_stream_data_wait(sk, timeo);
1813
1814 if (signal_pending(current)) {
1815 err = sock_intr_errno(timeo);
1816 goto out;
1817 }
57b47a53 1818 mutex_lock(&u->readlock);
1da177e4 1819 continue;
3c0d2f37
MS
1820 unlock:
1821 unix_state_unlock(sk);
1822 break;
1da177e4 1823 }
3c0d2f37 1824 unix_state_unlock(sk);
1da177e4
LT
1825
1826 if (check_creds) {
1827 /* Never glue messages from different writers */
1828 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1829 skb_queue_head(&sk->sk_receive_queue, skb);
1830 break;
1831 }
1832 } else {
1833 /* Copy credentials */
1834 siocb->scm->creds = *UNIXCREDS(skb);
1835 check_creds = 1;
1836 }
1837
1838 /* Copy address just once */
1839 if (sunaddr)
1840 {
1841 unix_copy_addr(msg, skb->sk);
1842 sunaddr = NULL;
1843 }
1844
1845 chunk = min_t(unsigned int, skb->len, size);
1846 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1847 skb_queue_head(&sk->sk_receive_queue, skb);
1848 if (copied == 0)
1849 copied = -EFAULT;
1850 break;
1851 }
1852 copied += chunk;
1853 size -= chunk;
1854
1855 /* Mark read part of skb as used */
1856 if (!(flags & MSG_PEEK))
1857 {
1858 skb_pull(skb, chunk);
1859
1860 if (UNIXCB(skb).fp)
1861 unix_detach_fds(siocb->scm, skb);
1862
1863 /* put the skb back if we didn't use it up.. */
1864 if (skb->len)
1865 {
1866 skb_queue_head(&sk->sk_receive_queue, skb);
1867 break;
1868 }
1869
1870 kfree_skb(skb);
1871
1872 if (siocb->scm->fp)
1873 break;
1874 }
1875 else
1876 {
1877 /* It is questionable, see note in unix_dgram_recvmsg.
1878 */
1879 if (UNIXCB(skb).fp)
1880 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1881
1882 /* put message back and return */
1883 skb_queue_head(&sk->sk_receive_queue, skb);
1884 break;
1885 }
1886 } while (size);
1887
57b47a53 1888 mutex_unlock(&u->readlock);
1da177e4
LT
1889 scm_recv(sock, msg, siocb->scm, flags);
1890out:
1891 return copied ? : err;
1892}
1893
1894static int unix_shutdown(struct socket *sock, int mode)
1895{
1896 struct sock *sk = sock->sk;
1897 struct sock *other;
1898
1899 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1900
1901 if (mode) {
1c92b4e5 1902 unix_state_lock(sk);
1da177e4
LT
1903 sk->sk_shutdown |= mode;
1904 other=unix_peer(sk);
1905 if (other)
1906 sock_hold(other);
1c92b4e5 1907 unix_state_unlock(sk);
1da177e4
LT
1908 sk->sk_state_change(sk);
1909
1910 if (other &&
1911 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1912
1913 int peer_mode = 0;
1914
1915 if (mode&RCV_SHUTDOWN)
1916 peer_mode |= SEND_SHUTDOWN;
1917 if (mode&SEND_SHUTDOWN)
1918 peer_mode |= RCV_SHUTDOWN;
1c92b4e5 1919 unix_state_lock(other);
1da177e4 1920 other->sk_shutdown |= peer_mode;
1c92b4e5 1921 unix_state_unlock(other);
1da177e4
LT
1922 other->sk_state_change(other);
1923 read_lock(&other->sk_callback_lock);
1924 if (peer_mode == SHUTDOWN_MASK)
8d8ad9d7 1925 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4 1926 else if (peer_mode & RCV_SHUTDOWN)
8d8ad9d7 1927 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4
LT
1928 read_unlock(&other->sk_callback_lock);
1929 }
1930 if (other)
1931 sock_put(other);
1932 }
1933 return 0;
1934}
1935
1936static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1937{
1938 struct sock *sk = sock->sk;
1939 long amount=0;
1940 int err;
1941
1942 switch(cmd)
1943 {
1944 case SIOCOUTQ:
1945 amount = atomic_read(&sk->sk_wmem_alloc);
1946 err = put_user(amount, (int __user *)arg);
1947 break;
1948 case SIOCINQ:
1949 {
1950 struct sk_buff *skb;
1951
1952 if (sk->sk_state == TCP_LISTEN) {
1953 err = -EINVAL;
1954 break;
1955 }
1956
1957 spin_lock(&sk->sk_receive_queue.lock);
1958 if (sk->sk_type == SOCK_STREAM ||
1959 sk->sk_type == SOCK_SEQPACKET) {
1960 skb_queue_walk(&sk->sk_receive_queue, skb)
1961 amount += skb->len;
1962 } else {
1963 skb = skb_peek(&sk->sk_receive_queue);
1964 if (skb)
1965 amount=skb->len;
1966 }
1967 spin_unlock(&sk->sk_receive_queue.lock);
1968 err = put_user(amount, (int __user *)arg);
1969 break;
1970 }
1971
1972 default:
b5e5fa5e 1973 err = -ENOIOCTLCMD;
1da177e4
LT
1974 break;
1975 }
1976 return err;
1977}
1978
1979static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1980{
1981 struct sock *sk = sock->sk;
1982 unsigned int mask;
1983
1984 poll_wait(file, sk->sk_sleep, wait);
1985 mask = 0;
1986
1987 /* exceptional events? */
1988 if (sk->sk_err)
1989 mask |= POLLERR;
1990 if (sk->sk_shutdown == SHUTDOWN_MASK)
1991 mask |= POLLHUP;
f348d70a
DL
1992 if (sk->sk_shutdown & RCV_SHUTDOWN)
1993 mask |= POLLRDHUP;
1da177e4
LT
1994
1995 /* readable? */
1996 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1997 (sk->sk_shutdown & RCV_SHUTDOWN))
1998 mask |= POLLIN | POLLRDNORM;
1999
2000 /* Connection-based need to check for termination and startup */
2001 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
2002 mask |= POLLHUP;
2003
2004 /*
2005 * we set writable also when the other side has shut down the
2006 * connection. This prevents stuck sockets.
2007 */
2008 if (unix_writable(sk))
2009 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2010
2011 return mask;
2012}
2013
ec0d215f
RW
2014static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2015 poll_table *wait)
3c73419c 2016{
ec0d215f
RW
2017 struct sock *sk = sock->sk, *other;
2018 unsigned int mask, writable;
3c73419c
RW
2019
2020 poll_wait(file, sk->sk_sleep, wait);
3c73419c
RW
2021 mask = 0;
2022
2023 /* exceptional events? */
2024 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2025 mask |= POLLERR;
2026 if (sk->sk_shutdown & RCV_SHUTDOWN)
2027 mask |= POLLRDHUP;
2028 if (sk->sk_shutdown == SHUTDOWN_MASK)
2029 mask |= POLLHUP;
2030
2031 /* readable? */
2032 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2033 (sk->sk_shutdown & RCV_SHUTDOWN))
2034 mask |= POLLIN | POLLRDNORM;
2035
2036 /* Connection-based need to check for termination and startup */
2037 if (sk->sk_type == SOCK_SEQPACKET) {
2038 if (sk->sk_state == TCP_CLOSE)
2039 mask |= POLLHUP;
2040 /* connection hasn't started yet? */
2041 if (sk->sk_state == TCP_SYN_SENT)
2042 return mask;
2043 }
2044
2045 /* writable? */
ec0d215f
RW
2046 writable = unix_writable(sk);
2047 if (writable) {
2048 other = unix_peer_get(sk);
2049 if (other) {
2050 if (unix_peer(other) != sk) {
2051 poll_wait(file, &unix_sk(other)->peer_wait,
2052 wait);
2053 if (unix_recvq_full(other))
2054 writable = 0;
2055 }
2056
2057 sock_put(other);
2058 }
2059 }
2060
2061 if (writable)
3c73419c
RW
2062 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2063 else
2064 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2065
3c73419c
RW
2066 return mask;
2067}
1da177e4
LT
2068
2069#ifdef CONFIG_PROC_FS
a53eb3fe
PE
2070static struct sock *first_unix_socket(int *i)
2071{
2072 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2073 if (!hlist_empty(&unix_socket_table[*i]))
2074 return __sk_head(&unix_socket_table[*i]);
2075 }
2076 return NULL;
2077}
2078
2079static struct sock *next_unix_socket(int *i, struct sock *s)
2080{
2081 struct sock *next = sk_next(s);
2082 /* More in this chain? */
2083 if (next)
2084 return next;
2085 /* Look for next non-empty chain. */
2086 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2087 if (!hlist_empty(&unix_socket_table[*i]))
2088 return __sk_head(&unix_socket_table[*i]);
2089 }
2090 return NULL;
2091}
2092
097e66c5 2093struct unix_iter_state {
e372c414 2094 struct seq_net_private p;
097e66c5
DL
2095 int i;
2096};
1218854a 2097static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
1da177e4 2098{
1218854a 2099 struct unix_iter_state *iter = seq->private;
1da177e4
LT
2100 loff_t off = 0;
2101 struct sock *s;
2102
097e66c5 2103 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
1218854a 2104 if (sock_net(s) != seq_file_net(seq))
097e66c5 2105 continue;
ac7bfa62 2106 if (off == pos)
1da177e4
LT
2107 return s;
2108 ++off;
2109 }
2110 return NULL;
2111}
2112
2113
2114static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2115 __acquires(unix_table_lock)
1da177e4 2116{
fbe9cc4a 2117 spin_lock(&unix_table_lock);
b9f3124f 2118 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1da177e4
LT
2119}
2120
2121static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2122{
097e66c5
DL
2123 struct unix_iter_state *iter = seq->private;
2124 struct sock *sk = v;
1da177e4
LT
2125 ++*pos;
2126
b9f3124f 2127 if (v == SEQ_START_TOKEN)
097e66c5
DL
2128 sk = first_unix_socket(&iter->i);
2129 else
2130 sk = next_unix_socket(&iter->i, sk);
1218854a 2131 while (sk && (sock_net(sk) != seq_file_net(seq)))
097e66c5
DL
2132 sk = next_unix_socket(&iter->i, sk);
2133 return sk;
1da177e4
LT
2134}
2135
2136static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2137 __releases(unix_table_lock)
1da177e4 2138{
fbe9cc4a 2139 spin_unlock(&unix_table_lock);
1da177e4
LT
2140}
2141
2142static int unix_seq_show(struct seq_file *seq, void *v)
2143{
ac7bfa62 2144
b9f3124f 2145 if (v == SEQ_START_TOKEN)
1da177e4
LT
2146 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2147 "Inode Path\n");
2148 else {
2149 struct sock *s = v;
2150 struct unix_sock *u = unix_sk(s);
1c92b4e5 2151 unix_state_lock(s);
1da177e4
LT
2152
2153 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2154 s,
2155 atomic_read(&s->sk_refcnt),
2156 0,
2157 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2158 s->sk_type,
2159 s->sk_socket ?
2160 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2161 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2162 sock_i_ino(s));
2163
2164 if (u->addr) {
2165 int i, len;
2166 seq_putc(seq, ' ');
2167
2168 i = 0;
2169 len = u->addr->len - sizeof(short);
2170 if (!UNIX_ABSTRACT(s))
2171 len--;
2172 else {
2173 seq_putc(seq, '@');
2174 i++;
2175 }
2176 for ( ; i < len; i++)
2177 seq_putc(seq, u->addr->name->sun_path[i]);
2178 }
1c92b4e5 2179 unix_state_unlock(s);
1da177e4
LT
2180 seq_putc(seq, '\n');
2181 }
2182
2183 return 0;
2184}
2185
56b3d975 2186static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2187 .start = unix_seq_start,
2188 .next = unix_seq_next,
2189 .stop = unix_seq_stop,
2190 .show = unix_seq_show,
2191};
2192
2193
2194static int unix_seq_open(struct inode *inode, struct file *file)
2195{
e372c414
DL
2196 return seq_open_net(inode, file, &unix_seq_ops,
2197 sizeof(struct unix_iter_state));
1da177e4
LT
2198}
2199
da7071d7 2200static const struct file_operations unix_seq_fops = {
1da177e4
LT
2201 .owner = THIS_MODULE,
2202 .open = unix_seq_open,
2203 .read = seq_read,
2204 .llseek = seq_lseek,
e372c414 2205 .release = seq_release_net,
1da177e4
LT
2206};
2207
2208#endif
2209
2210static struct net_proto_family unix_family_ops = {
2211 .family = PF_UNIX,
2212 .create = unix_create,
2213 .owner = THIS_MODULE,
2214};
2215
097e66c5
DL
2216
2217static int unix_net_init(struct net *net)
2218{
2219 int error = -ENOMEM;
2220
a0a53c8b 2221 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2222 if (unix_sysctl_register(net))
2223 goto out;
d392e497 2224
097e66c5 2225#ifdef CONFIG_PROC_FS
1597fbc0
PE
2226 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2227 unix_sysctl_unregister(net);
097e66c5 2228 goto out;
1597fbc0 2229 }
097e66c5
DL
2230#endif
2231 error = 0;
2232out:
48dcc33e 2233 return error;
097e66c5
DL
2234}
2235
2236static void unix_net_exit(struct net *net)
2237{
1597fbc0 2238 unix_sysctl_unregister(net);
097e66c5
DL
2239 proc_net_remove(net, "unix");
2240}
2241
2242static struct pernet_operations unix_net_ops = {
2243 .init = unix_net_init,
2244 .exit = unix_net_exit,
2245};
2246
1da177e4
LT
2247static int __init af_unix_init(void)
2248{
2249 int rc = -1;
2250 struct sk_buff *dummy_skb;
2251
ef047f5e 2252 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
1da177e4
LT
2253
2254 rc = proto_register(&unix_proto, 1);
ac7bfa62
YH
2255 if (rc != 0) {
2256 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
0dc47877 2257 __func__);
1da177e4
LT
2258 goto out;
2259 }
2260
2261 sock_register(&unix_family_ops);
097e66c5 2262 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2263out:
2264 return rc;
2265}
2266
2267static void __exit af_unix_exit(void)
2268{
2269 sock_unregister(PF_UNIX);
1da177e4 2270 proto_unregister(&unix_proto);
097e66c5 2271 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2272}
2273
3d366960
DW
2274/* Earlier than device_initcall() so that other drivers invoking
2275 request_module() don't end up in a loop when modprobe tries
2276 to use a UNIX socket. But later than subsys_initcall() because
2277 we depend on stuff initialised there */
2278fs_initcall(af_unix_init);
1da177e4
LT
2279module_exit(af_unix_exit);
2280
2281MODULE_LICENSE("GPL");
2282MODULE_ALIAS_NETPROTO(PF_UNIX);