]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/netlink/af_netlink.c
[NETLINK]: Kill CB only when socket is unused
[net-next-2.6.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
746fac4d 11 *
1da177e4
LT
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/module.h>
25
4fc268d2 26#include <linux/capability.h>
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
e7c34970 58#include <linux/selinux.h>
af65bdfc 59#include <linux/mutex.h>
54e0f520 60
1da177e4
LT
61#include <net/sock.h>
62#include <net/scm.h>
82ace47a 63#include <net/netlink.h>
1da177e4 64
f7fa9b10 65#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
1da177e4
LT
66
67struct netlink_sock {
68 /* struct sock has to be the first member of netlink_sock */
69 struct sock sk;
70 u32 pid;
1da177e4 71 u32 dst_pid;
d629b836 72 u32 dst_group;
f7fa9b10
PM
73 u32 flags;
74 u32 subscriptions;
75 u32 ngroups;
76 unsigned long *groups;
1da177e4
LT
77 unsigned long state;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
af65bdfc
PM
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
1da177e4 82 void (*data_ready)(struct sock *sk, int bytes);
77247bbb 83 struct module *module;
1da177e4
LT
84};
85
77247bbb 86#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 87#define NETLINK_RECV_PKTINFO 0x2
77247bbb 88
1da177e4
LT
89static inline struct netlink_sock *nlk_sk(struct sock *sk)
90{
91 return (struct netlink_sock *)sk;
92}
93
94struct nl_pid_hash {
95 struct hlist_head *table;
96 unsigned long rehash_time;
97
98 unsigned int mask;
99 unsigned int shift;
100
101 unsigned int entries;
102 unsigned int max_shift;
103
104 u32 rnd;
105};
106
107struct netlink_table {
108 struct nl_pid_hash hash;
109 struct hlist_head mc_list;
4277a083 110 unsigned long *listeners;
1da177e4 111 unsigned int nl_nonroot;
f7fa9b10 112 unsigned int groups;
af65bdfc 113 struct mutex *cb_mutex;
77247bbb 114 struct module *module;
ab33a171 115 int registered;
1da177e4
LT
116};
117
118static struct netlink_table *nl_table;
119
120static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
121
122static int netlink_dump(struct sock *sk);
123static void netlink_destroy_callback(struct netlink_callback *cb);
42bad1da 124static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb);
1da177e4
LT
125
126static DEFINE_RWLOCK(nl_table_lock);
127static atomic_t nl_table_users = ATOMIC_INIT(0);
128
e041c683 129static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 130
d629b836
PM
131static u32 netlink_group_mask(u32 group)
132{
133 return group ? 1 << (group - 1) : 0;
134}
135
1da177e4
LT
136static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
137{
138 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
139}
140
141static void netlink_sock_destruct(struct sock *sk)
142{
3f660d66
HX
143 struct netlink_sock *nlk = nlk_sk(sk);
144
145 BUG_ON(mutex_is_locked(nlk_sk(sk)->cb_mutex));
146 if (nlk->cb) {
147 if (nlk->cb->done)
148 nlk->cb->done(nlk->cb);
149 netlink_destroy_callback(nlk->cb);
150 }
151
1da177e4
LT
152 skb_queue_purge(&sk->sk_receive_queue);
153
154 if (!sock_flag(sk, SOCK_DEAD)) {
155 printk("Freeing alive netlink socket %p\n", sk);
156 return;
157 }
158 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
159 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
f7fa9b10 160 BUG_TRAP(!nlk_sk(sk)->groups);
1da177e4
LT
161}
162
163/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
164 * Look, when several writers sleep and reader wakes them up, all but one
165 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
166 * this, _but_ remember, it adds useless work on UP machines.
167 */
168
169static void netlink_table_grab(void)
170{
6abd219c 171 write_lock_irq(&nl_table_lock);
1da177e4
LT
172
173 if (atomic_read(&nl_table_users)) {
174 DECLARE_WAITQUEUE(wait, current);
175
176 add_wait_queue_exclusive(&nl_table_wait, &wait);
177 for(;;) {
178 set_current_state(TASK_UNINTERRUPTIBLE);
179 if (atomic_read(&nl_table_users) == 0)
180 break;
6abd219c 181 write_unlock_irq(&nl_table_lock);
1da177e4 182 schedule();
6abd219c 183 write_lock_irq(&nl_table_lock);
1da177e4
LT
184 }
185
186 __set_current_state(TASK_RUNNING);
187 remove_wait_queue(&nl_table_wait, &wait);
188 }
189}
190
191static __inline__ void netlink_table_ungrab(void)
192{
6abd219c 193 write_unlock_irq(&nl_table_lock);
1da177e4
LT
194 wake_up(&nl_table_wait);
195}
196
197static __inline__ void
198netlink_lock_table(void)
199{
200 /* read_lock() synchronizes us to netlink_table_grab */
201
202 read_lock(&nl_table_lock);
203 atomic_inc(&nl_table_users);
204 read_unlock(&nl_table_lock);
205}
206
207static __inline__ void
208netlink_unlock_table(void)
209{
210 if (atomic_dec_and_test(&nl_table_users))
211 wake_up(&nl_table_wait);
212}
213
214static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
215{
216 struct nl_pid_hash *hash = &nl_table[protocol].hash;
217 struct hlist_head *head;
218 struct sock *sk;
219 struct hlist_node *node;
220
221 read_lock(&nl_table_lock);
222 head = nl_pid_hashfn(hash, pid);
223 sk_for_each(sk, node, head) {
224 if (nlk_sk(sk)->pid == pid) {
225 sock_hold(sk);
226 goto found;
227 }
228 }
229 sk = NULL;
230found:
231 read_unlock(&nl_table_lock);
232 return sk;
233}
234
235static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
236{
237 if (size <= PAGE_SIZE)
238 return kmalloc(size, GFP_ATOMIC);
239 else
240 return (struct hlist_head *)
241 __get_free_pages(GFP_ATOMIC, get_order(size));
242}
243
244static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
245{
246 if (size <= PAGE_SIZE)
247 kfree(table);
248 else
249 free_pages((unsigned long)table, get_order(size));
250}
251
252static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
253{
254 unsigned int omask, mask, shift;
255 size_t osize, size;
256 struct hlist_head *otable, *table;
257 int i;
258
259 omask = mask = hash->mask;
260 osize = size = (mask + 1) * sizeof(*table);
261 shift = hash->shift;
262
263 if (grow) {
264 if (++shift > hash->max_shift)
265 return 0;
266 mask = mask * 2 + 1;
267 size *= 2;
268 }
269
270 table = nl_pid_hash_alloc(size);
271 if (!table)
272 return 0;
273
274 memset(table, 0, size);
275 otable = hash->table;
276 hash->table = table;
277 hash->mask = mask;
278 hash->shift = shift;
279 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
280
281 for (i = 0; i <= omask; i++) {
282 struct sock *sk;
283 struct hlist_node *node, *tmp;
284
285 sk_for_each_safe(sk, node, tmp, &otable[i])
286 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
287 }
288
289 nl_pid_hash_free(otable, osize);
290 hash->rehash_time = jiffies + 10 * 60 * HZ;
291 return 1;
292}
293
294static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
295{
296 int avg = hash->entries >> hash->shift;
297
298 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
299 return 1;
300
301 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
302 nl_pid_hash_rehash(hash, 0);
303 return 1;
304 }
305
306 return 0;
307}
308
90ddc4f0 309static const struct proto_ops netlink_ops;
1da177e4 310
4277a083
PM
311static void
312netlink_update_listeners(struct sock *sk)
313{
314 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
315 struct hlist_node *node;
316 unsigned long mask;
317 unsigned int i;
318
319 for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) {
320 mask = 0;
321 sk_for_each_bound(sk, node, &tbl->mc_list)
322 mask |= nlk_sk(sk)->groups[i];
323 tbl->listeners[i] = mask;
324 }
325 /* this function is only called with the netlink table "grabbed", which
326 * makes sure updates are visible before bind or setsockopt return. */
327}
328
1da177e4
LT
329static int netlink_insert(struct sock *sk, u32 pid)
330{
331 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
332 struct hlist_head *head;
333 int err = -EADDRINUSE;
334 struct sock *osk;
335 struct hlist_node *node;
336 int len;
337
338 netlink_table_grab();
339 head = nl_pid_hashfn(hash, pid);
340 len = 0;
341 sk_for_each(osk, node, head) {
342 if (nlk_sk(osk)->pid == pid)
343 break;
344 len++;
345 }
346 if (node)
347 goto err;
348
349 err = -EBUSY;
350 if (nlk_sk(sk)->pid)
351 goto err;
352
353 err = -ENOMEM;
354 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
355 goto err;
356
357 if (len && nl_pid_hash_dilute(hash, len))
358 head = nl_pid_hashfn(hash, pid);
359 hash->entries++;
360 nlk_sk(sk)->pid = pid;
361 sk_add_node(sk, head);
362 err = 0;
363
364err:
365 netlink_table_ungrab();
366 return err;
367}
368
369static void netlink_remove(struct sock *sk)
370{
371 netlink_table_grab();
d470e3b4
DM
372 if (sk_del_node_init(sk))
373 nl_table[sk->sk_protocol].hash.entries--;
f7fa9b10 374 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
375 __sk_del_bind_node(sk);
376 netlink_table_ungrab();
377}
378
379static struct proto netlink_proto = {
380 .name = "NETLINK",
381 .owner = THIS_MODULE,
382 .obj_size = sizeof(struct netlink_sock),
383};
384
af65bdfc
PM
385static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
386 int protocol)
1da177e4
LT
387{
388 struct sock *sk;
389 struct netlink_sock *nlk;
ab33a171
PM
390
391 sock->ops = &netlink_ops;
392
393 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
394 if (!sk)
395 return -ENOMEM;
396
397 sock_init_data(sock, sk);
398
399 nlk = nlk_sk(sk);
ffa4d721
PM
400 if (cb_mutex)
401 nlk->cb_mutex = cb_mutex;
402 else {
403 nlk->cb_mutex = &nlk->cb_def_mutex;
404 mutex_init(nlk->cb_mutex);
405 }
ab33a171
PM
406 init_waitqueue_head(&nlk->wait);
407
408 sk->sk_destruct = netlink_sock_destruct;
409 sk->sk_protocol = protocol;
410 return 0;
411}
412
413static int netlink_create(struct socket *sock, int protocol)
414{
415 struct module *module = NULL;
af65bdfc 416 struct mutex *cb_mutex;
f7fa9b10 417 struct netlink_sock *nlk;
ab33a171 418 int err = 0;
1da177e4
LT
419
420 sock->state = SS_UNCONNECTED;
421
422 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
423 return -ESOCKTNOSUPPORT;
424
425 if (protocol<0 || protocol >= MAX_LINKS)
426 return -EPROTONOSUPPORT;
427
77247bbb 428 netlink_lock_table();
4fdb3bb7 429#ifdef CONFIG_KMOD
ab33a171 430 if (!nl_table[protocol].registered) {
77247bbb 431 netlink_unlock_table();
4fdb3bb7 432 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 433 netlink_lock_table();
4fdb3bb7 434 }
ab33a171
PM
435#endif
436 if (nl_table[protocol].registered &&
437 try_module_get(nl_table[protocol].module))
438 module = nl_table[protocol].module;
af65bdfc 439 cb_mutex = nl_table[protocol].cb_mutex;
77247bbb 440 netlink_unlock_table();
4fdb3bb7 441
af65bdfc 442 if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
f7fa9b10
PM
443 goto out_module;
444
445 nlk = nlk_sk(sock->sk);
f7fa9b10 446 nlk->module = module;
ab33a171
PM
447out:
448 return err;
1da177e4 449
ab33a171
PM
450out_module:
451 module_put(module);
452 goto out;
1da177e4
LT
453}
454
455static int netlink_release(struct socket *sock)
456{
457 struct sock *sk = sock->sk;
458 struct netlink_sock *nlk;
459
460 if (!sk)
461 return 0;
462
463 netlink_remove(sk);
ac57b3a9 464 sock_orphan(sk);
1da177e4
LT
465 nlk = nlk_sk(sk);
466
3f660d66
HX
467 /*
468 * OK. Socket is unlinked, any packets that arrive now
469 * will be purged.
470 */
1da177e4 471
1da177e4
LT
472 sock->sk = NULL;
473 wake_up_interruptible_all(&nlk->wait);
474
475 skb_queue_purge(&sk->sk_write_queue);
476
f7fa9b10 477 if (nlk->pid && !nlk->subscriptions) {
1da177e4
LT
478 struct netlink_notify n = {
479 .protocol = sk->sk_protocol,
480 .pid = nlk->pid,
481 };
e041c683
AS
482 atomic_notifier_call_chain(&netlink_chain,
483 NETLINK_URELEASE, &n);
746fac4d 484 }
4fdb3bb7 485
5e7c001c 486 module_put(nlk->module);
4fdb3bb7 487
4277a083 488 netlink_table_grab();
77247bbb 489 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
4277a083 490 kfree(nl_table[sk->sk_protocol].listeners);
77247bbb 491 nl_table[sk->sk_protocol].module = NULL;
ab33a171 492 nl_table[sk->sk_protocol].registered = 0;
4277a083
PM
493 } else if (nlk->subscriptions)
494 netlink_update_listeners(sk);
495 netlink_table_ungrab();
77247bbb 496
f7fa9b10
PM
497 kfree(nlk->groups);
498 nlk->groups = NULL;
499
1da177e4
LT
500 sock_put(sk);
501 return 0;
502}
503
504static int netlink_autobind(struct socket *sock)
505{
506 struct sock *sk = sock->sk;
507 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
508 struct hlist_head *head;
509 struct sock *osk;
510 struct hlist_node *node;
c27bd492 511 s32 pid = current->tgid;
1da177e4
LT
512 int err;
513 static s32 rover = -4097;
514
515retry:
516 cond_resched();
517 netlink_table_grab();
518 head = nl_pid_hashfn(hash, pid);
519 sk_for_each(osk, node, head) {
520 if (nlk_sk(osk)->pid == pid) {
521 /* Bind collision, search negative pid values. */
522 pid = rover--;
523 if (rover > -4097)
524 rover = -4097;
525 netlink_table_ungrab();
526 goto retry;
527 }
528 }
529 netlink_table_ungrab();
530
531 err = netlink_insert(sk, pid);
532 if (err == -EADDRINUSE)
533 goto retry;
d470e3b4
DM
534
535 /* If 2 threads race to autobind, that is fine. */
536 if (err == -EBUSY)
537 err = 0;
538
539 return err;
1da177e4
LT
540}
541
746fac4d
YH
542static inline int netlink_capable(struct socket *sock, unsigned int flag)
543{
1da177e4
LT
544 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
545 capable(CAP_NET_ADMIN);
746fac4d 546}
1da177e4 547
f7fa9b10
PM
548static void
549netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
550{
551 struct netlink_sock *nlk = nlk_sk(sk);
552
553 if (nlk->subscriptions && !subscriptions)
554 __sk_del_bind_node(sk);
555 else if (!nlk->subscriptions && subscriptions)
556 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
557 nlk->subscriptions = subscriptions;
558}
559
513c2500
PM
560static int netlink_alloc_groups(struct sock *sk)
561{
562 struct netlink_sock *nlk = nlk_sk(sk);
563 unsigned int groups;
564 int err = 0;
565
566 netlink_lock_table();
567 groups = nl_table[sk->sk_protocol].groups;
568 if (!nl_table[sk->sk_protocol].registered)
569 err = -ENOENT;
570 netlink_unlock_table();
571
572 if (err)
573 return err;
574
0da974f4 575 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
513c2500
PM
576 if (nlk->groups == NULL)
577 return -ENOMEM;
513c2500
PM
578 nlk->ngroups = groups;
579 return 0;
580}
581
1da177e4
LT
582static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
583{
584 struct sock *sk = sock->sk;
585 struct netlink_sock *nlk = nlk_sk(sk);
586 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
587 int err;
746fac4d 588
1da177e4
LT
589 if (nladdr->nl_family != AF_NETLINK)
590 return -EINVAL;
591
592 /* Only superuser is allowed to listen multicasts */
513c2500
PM
593 if (nladdr->nl_groups) {
594 if (!netlink_capable(sock, NL_NONROOT_RECV))
595 return -EPERM;
596 if (nlk->groups == NULL) {
597 err = netlink_alloc_groups(sk);
598 if (err)
599 return err;
600 }
601 }
1da177e4
LT
602
603 if (nlk->pid) {
604 if (nladdr->nl_pid != nlk->pid)
605 return -EINVAL;
606 } else {
607 err = nladdr->nl_pid ?
608 netlink_insert(sk, nladdr->nl_pid) :
609 netlink_autobind(sock);
610 if (err)
611 return err;
612 }
613
513c2500 614 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
615 return 0;
616
617 netlink_table_grab();
f7fa9b10 618 netlink_update_subscriptions(sk, nlk->subscriptions +
746fac4d
YH
619 hweight32(nladdr->nl_groups) -
620 hweight32(nlk->groups[0]));
621 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
4277a083 622 netlink_update_listeners(sk);
1da177e4
LT
623 netlink_table_ungrab();
624
625 return 0;
626}
627
628static int netlink_connect(struct socket *sock, struct sockaddr *addr,
629 int alen, int flags)
630{
631 int err = 0;
632 struct sock *sk = sock->sk;
633 struct netlink_sock *nlk = nlk_sk(sk);
634 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
635
636 if (addr->sa_family == AF_UNSPEC) {
637 sk->sk_state = NETLINK_UNCONNECTED;
638 nlk->dst_pid = 0;
d629b836 639 nlk->dst_group = 0;
1da177e4
LT
640 return 0;
641 }
642 if (addr->sa_family != AF_NETLINK)
643 return -EINVAL;
644
645 /* Only superuser is allowed to send multicasts */
646 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
647 return -EPERM;
648
649 if (!nlk->pid)
650 err = netlink_autobind(sock);
651
652 if (err == 0) {
653 sk->sk_state = NETLINK_CONNECTED;
654 nlk->dst_pid = nladdr->nl_pid;
d629b836 655 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
656 }
657
658 return err;
659}
660
661static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
662{
663 struct sock *sk = sock->sk;
664 struct netlink_sock *nlk = nlk_sk(sk);
665 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
746fac4d 666
1da177e4
LT
667 nladdr->nl_family = AF_NETLINK;
668 nladdr->nl_pad = 0;
669 *addr_len = sizeof(*nladdr);
670
671 if (peer) {
672 nladdr->nl_pid = nlk->dst_pid;
d629b836 673 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4
LT
674 } else {
675 nladdr->nl_pid = nlk->pid;
513c2500 676 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
677 }
678 return 0;
679}
680
681static void netlink_overrun(struct sock *sk)
682{
683 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
684 sk->sk_err = ENOBUFS;
685 sk->sk_error_report(sk);
686 }
687}
688
689static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
690{
691 int protocol = ssk->sk_protocol;
692 struct sock *sock;
693 struct netlink_sock *nlk;
694
695 sock = netlink_lookup(protocol, pid);
696 if (!sock)
697 return ERR_PTR(-ECONNREFUSED);
698
699 /* Don't bother queuing skb if kernel socket has no input function */
700 nlk = nlk_sk(sock);
701 if ((nlk->pid == 0 && !nlk->data_ready) ||
702 (sock->sk_state == NETLINK_CONNECTED &&
703 nlk->dst_pid != nlk_sk(ssk)->pid)) {
704 sock_put(sock);
705 return ERR_PTR(-ECONNREFUSED);
706 }
707 return sock;
708}
709
710struct sock *netlink_getsockbyfilp(struct file *filp)
711{
6db5fc5d 712 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
713 struct sock *sock;
714
715 if (!S_ISSOCK(inode->i_mode))
716 return ERR_PTR(-ENOTSOCK);
717
718 sock = SOCKET_I(inode)->sk;
719 if (sock->sk_family != AF_NETLINK)
720 return ERR_PTR(-EINVAL);
721
722 sock_hold(sock);
723 return sock;
724}
725
726/*
727 * Attach a skb to a netlink socket.
728 * The caller must hold a reference to the destination socket. On error, the
729 * reference is dropped. The skb is not send to the destination, just all
730 * all error checks are performed and memory in the queue is reserved.
731 * Return values:
732 * < 0: error. skb freed, reference to sock dropped.
733 * 0: continue
734 * 1: repeat lookup - reference dropped while waiting for socket memory.
735 */
a70ea994
AK
736int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
737 long timeo, struct sock *ssk)
1da177e4
LT
738{
739 struct netlink_sock *nlk;
740
741 nlk = nlk_sk(sk);
742
743 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
744 test_bit(0, &nlk->state)) {
745 DECLARE_WAITQUEUE(wait, current);
746 if (!timeo) {
a70ea994 747 if (!ssk || nlk_sk(ssk)->pid == 0)
1da177e4
LT
748 netlink_overrun(sk);
749 sock_put(sk);
750 kfree_skb(skb);
751 return -EAGAIN;
752 }
753
754 __set_current_state(TASK_INTERRUPTIBLE);
755 add_wait_queue(&nlk->wait, &wait);
756
757 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
758 test_bit(0, &nlk->state)) &&
759 !sock_flag(sk, SOCK_DEAD))
760 timeo = schedule_timeout(timeo);
761
762 __set_current_state(TASK_RUNNING);
763 remove_wait_queue(&nlk->wait, &wait);
764 sock_put(sk);
765
766 if (signal_pending(current)) {
767 kfree_skb(skb);
768 return sock_intr_errno(timeo);
769 }
770 return 1;
771 }
772 skb_set_owner_r(skb, sk);
773 return 0;
774}
775
776int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
777{
1da177e4
LT
778 int len = skb->len;
779
1da177e4
LT
780 skb_queue_tail(&sk->sk_receive_queue, skb);
781 sk->sk_data_ready(sk, len);
782 sock_put(sk);
783 return len;
784}
785
786void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
787{
788 kfree_skb(skb);
789 sock_put(sk);
790}
791
37da647d 792static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
dd0fc66f 793 gfp_t allocation)
1da177e4
LT
794{
795 int delta;
796
797 skb_orphan(skb);
798
4305b541 799 delta = skb->end - skb->tail;
1da177e4
LT
800 if (delta * 2 < skb->truesize)
801 return skb;
802
803 if (skb_shared(skb)) {
804 struct sk_buff *nskb = skb_clone(skb, allocation);
805 if (!nskb)
806 return skb;
807 kfree_skb(skb);
808 skb = nskb;
809 }
810
811 if (!pskb_expand_head(skb, 0, -delta, allocation))
812 skb->truesize -= delta;
813
814 return skb;
815}
816
817int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
818{
819 struct sock *sk;
820 int err;
821 long timeo;
822
823 skb = netlink_trim(skb, gfp_any());
824
825 timeo = sock_sndtimeo(ssk, nonblock);
826retry:
827 sk = netlink_getsockbypid(ssk, pid);
828 if (IS_ERR(sk)) {
829 kfree_skb(skb);
830 return PTR_ERR(sk);
831 }
a70ea994 832 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
1da177e4
LT
833 if (err == 1)
834 goto retry;
835 if (err)
836 return err;
837
838 return netlink_sendskb(sk, skb, ssk->sk_protocol);
839}
840
4277a083
PM
841int netlink_has_listeners(struct sock *sk, unsigned int group)
842{
843 int res = 0;
844
845 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
846 if (group - 1 < nl_table[sk->sk_protocol].groups)
847 res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners);
848 return res;
849}
850EXPORT_SYMBOL_GPL(netlink_has_listeners);
851
1da177e4
LT
852static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
853{
854 struct netlink_sock *nlk = nlk_sk(sk);
855
856 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
857 !test_bit(0, &nlk->state)) {
858 skb_set_owner_r(skb, sk);
859 skb_queue_tail(&sk->sk_receive_queue, skb);
860 sk->sk_data_ready(sk, skb->len);
861 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
862 }
863 return -1;
864}
865
866struct netlink_broadcast_data {
867 struct sock *exclude_sk;
868 u32 pid;
869 u32 group;
870 int failure;
871 int congested;
872 int delivered;
7d877f3b 873 gfp_t allocation;
1da177e4
LT
874 struct sk_buff *skb, *skb2;
875};
876
877static inline int do_one_broadcast(struct sock *sk,
878 struct netlink_broadcast_data *p)
879{
880 struct netlink_sock *nlk = nlk_sk(sk);
881 int val;
882
883 if (p->exclude_sk == sk)
884 goto out;
885
f7fa9b10
PM
886 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
887 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
888 goto out;
889
890 if (p->failure) {
891 netlink_overrun(sk);
892 goto out;
893 }
894
895 sock_hold(sk);
896 if (p->skb2 == NULL) {
68acc024 897 if (skb_shared(p->skb)) {
1da177e4
LT
898 p->skb2 = skb_clone(p->skb, p->allocation);
899 } else {
68acc024
TC
900 p->skb2 = skb_get(p->skb);
901 /*
902 * skb ownership may have been set when
903 * delivered to a previous socket.
904 */
905 skb_orphan(p->skb2);
1da177e4
LT
906 }
907 }
908 if (p->skb2 == NULL) {
909 netlink_overrun(sk);
910 /* Clone failed. Notify ALL listeners. */
911 p->failure = 1;
912 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
913 netlink_overrun(sk);
914 } else {
915 p->congested |= val;
916 p->delivered = 1;
917 p->skb2 = NULL;
918 }
919 sock_put(sk);
920
921out:
922 return 0;
923}
924
925int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
dd0fc66f 926 u32 group, gfp_t allocation)
1da177e4
LT
927{
928 struct netlink_broadcast_data info;
929 struct hlist_node *node;
930 struct sock *sk;
931
932 skb = netlink_trim(skb, allocation);
933
934 info.exclude_sk = ssk;
935 info.pid = pid;
936 info.group = group;
937 info.failure = 0;
938 info.congested = 0;
939 info.delivered = 0;
940 info.allocation = allocation;
941 info.skb = skb;
942 info.skb2 = NULL;
943
944 /* While we sleep in clone, do not allow to change socket list */
945
946 netlink_lock_table();
947
948 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
949 do_one_broadcast(sk, &info);
950
aa1c6a6f
TC
951 kfree_skb(skb);
952
1da177e4
LT
953 netlink_unlock_table();
954
955 if (info.skb2)
956 kfree_skb(info.skb2);
1da177e4
LT
957
958 if (info.delivered) {
959 if (info.congested && (allocation & __GFP_WAIT))
960 yield();
961 return 0;
962 }
963 if (info.failure)
964 return -ENOBUFS;
965 return -ESRCH;
966}
967
968struct netlink_set_err_data {
969 struct sock *exclude_sk;
970 u32 pid;
971 u32 group;
972 int code;
973};
974
975static inline int do_one_set_err(struct sock *sk,
976 struct netlink_set_err_data *p)
977{
978 struct netlink_sock *nlk = nlk_sk(sk);
979
980 if (sk == p->exclude_sk)
981 goto out;
982
f7fa9b10
PM
983 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
984 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
985 goto out;
986
987 sk->sk_err = p->code;
988 sk->sk_error_report(sk);
989out:
990 return 0;
991}
992
993void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
994{
995 struct netlink_set_err_data info;
996 struct hlist_node *node;
997 struct sock *sk;
998
999 info.exclude_sk = ssk;
1000 info.pid = pid;
1001 info.group = group;
1002 info.code = code;
1003
1004 read_lock(&nl_table_lock);
1005
1006 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1007 do_one_set_err(sk, &info);
1008
1009 read_unlock(&nl_table_lock);
1010}
1011
9a4595bc 1012static int netlink_setsockopt(struct socket *sock, int level, int optname,
746fac4d 1013 char __user *optval, int optlen)
9a4595bc
PM
1014{
1015 struct sock *sk = sock->sk;
1016 struct netlink_sock *nlk = nlk_sk(sk);
1017 int val = 0, err;
1018
1019 if (level != SOL_NETLINK)
1020 return -ENOPROTOOPT;
1021
1022 if (optlen >= sizeof(int) &&
1023 get_user(val, (int __user *)optval))
1024 return -EFAULT;
1025
1026 switch (optname) {
1027 case NETLINK_PKTINFO:
1028 if (val)
1029 nlk->flags |= NETLINK_RECV_PKTINFO;
1030 else
1031 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1032 err = 0;
1033 break;
1034 case NETLINK_ADD_MEMBERSHIP:
1035 case NETLINK_DROP_MEMBERSHIP: {
1036 unsigned int subscriptions;
1037 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1038
1039 if (!netlink_capable(sock, NL_NONROOT_RECV))
1040 return -EPERM;
513c2500
PM
1041 if (nlk->groups == NULL) {
1042 err = netlink_alloc_groups(sk);
1043 if (err)
1044 return err;
1045 }
9a4595bc
PM
1046 if (!val || val - 1 >= nlk->ngroups)
1047 return -EINVAL;
1048 netlink_table_grab();
1049 old = test_bit(val - 1, nlk->groups);
1050 subscriptions = nlk->subscriptions - old + new;
1051 if (new)
1052 __set_bit(val - 1, nlk->groups);
1053 else
1054 __clear_bit(val - 1, nlk->groups);
1055 netlink_update_subscriptions(sk, subscriptions);
4277a083 1056 netlink_update_listeners(sk);
9a4595bc
PM
1057 netlink_table_ungrab();
1058 err = 0;
1059 break;
1060 }
1061 default:
1062 err = -ENOPROTOOPT;
1063 }
1064 return err;
1065}
1066
1067static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 1068 char __user *optval, int __user *optlen)
9a4595bc
PM
1069{
1070 struct sock *sk = sock->sk;
1071 struct netlink_sock *nlk = nlk_sk(sk);
1072 int len, val, err;
1073
1074 if (level != SOL_NETLINK)
1075 return -ENOPROTOOPT;
1076
1077 if (get_user(len, optlen))
1078 return -EFAULT;
1079 if (len < 0)
1080 return -EINVAL;
1081
1082 switch (optname) {
1083 case NETLINK_PKTINFO:
1084 if (len < sizeof(int))
1085 return -EINVAL;
1086 len = sizeof(int);
1087 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
1088 if (put_user(len, optlen) ||
1089 put_user(val, optval))
1090 return -EFAULT;
9a4595bc
PM
1091 err = 0;
1092 break;
1093 default:
1094 err = -ENOPROTOOPT;
1095 }
1096 return err;
1097}
1098
1099static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1100{
1101 struct nl_pktinfo info;
1102
1103 info.group = NETLINK_CB(skb).dst_group;
1104 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1105}
1106
1da177e4
LT
1107static inline void netlink_rcv_wake(struct sock *sk)
1108{
1109 struct netlink_sock *nlk = nlk_sk(sk);
1110
b03efcfb 1111 if (skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
1112 clear_bit(0, &nlk->state);
1113 if (!test_bit(0, &nlk->state))
1114 wake_up_interruptible(&nlk->wait);
1115}
1116
1117static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1118 struct msghdr *msg, size_t len)
1119{
1120 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1121 struct sock *sk = sock->sk;
1122 struct netlink_sock *nlk = nlk_sk(sk);
1123 struct sockaddr_nl *addr=msg->msg_name;
1124 u32 dst_pid;
d629b836 1125 u32 dst_group;
1da177e4
LT
1126 struct sk_buff *skb;
1127 int err;
1128 struct scm_cookie scm;
1129
1130 if (msg->msg_flags&MSG_OOB)
1131 return -EOPNOTSUPP;
1132
1133 if (NULL == siocb->scm)
1134 siocb->scm = &scm;
1135 err = scm_send(sock, msg, siocb->scm);
1136 if (err < 0)
1137 return err;
1138
1139 if (msg->msg_namelen) {
1140 if (addr->nl_family != AF_NETLINK)
1141 return -EINVAL;
1142 dst_pid = addr->nl_pid;
d629b836
PM
1143 dst_group = ffs(addr->nl_groups);
1144 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1da177e4
LT
1145 return -EPERM;
1146 } else {
1147 dst_pid = nlk->dst_pid;
d629b836 1148 dst_group = nlk->dst_group;
1da177e4
LT
1149 }
1150
1151 if (!nlk->pid) {
1152 err = netlink_autobind(sock);
1153 if (err)
1154 goto out;
1155 }
1156
1157 err = -EMSGSIZE;
1158 if (len > sk->sk_sndbuf - 32)
1159 goto out;
1160 err = -ENOBUFS;
339bf98f 1161 skb = alloc_skb(len, GFP_KERNEL);
1da177e4
LT
1162 if (skb==NULL)
1163 goto out;
1164
1165 NETLINK_CB(skb).pid = nlk->pid;
d629b836 1166 NETLINK_CB(skb).dst_group = dst_group;
c94c257c 1167 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
e7c34970 1168 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1da177e4
LT
1169 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1170
1171 /* What can I do? Netlink is asynchronous, so that
1172 we will have to save current capabilities to
1173 check them, when this message will be delivered
1174 to corresponding kernel module. --ANK (980802)
1175 */
1176
1177 err = -EFAULT;
1178 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1179 kfree_skb(skb);
1180 goto out;
1181 }
1182
1183 err = security_netlink_send(sk, skb);
1184 if (err) {
1185 kfree_skb(skb);
1186 goto out;
1187 }
1188
d629b836 1189 if (dst_group) {
1da177e4 1190 atomic_inc(&skb->users);
d629b836 1191 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1da177e4
LT
1192 }
1193 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1194
1195out:
1196 return err;
1197}
1198
1199static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1200 struct msghdr *msg, size_t len,
1201 int flags)
1202{
1203 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1204 struct scm_cookie scm;
1205 struct sock *sk = sock->sk;
1206 struct netlink_sock *nlk = nlk_sk(sk);
1207 int noblock = flags&MSG_DONTWAIT;
1208 size_t copied;
1209 struct sk_buff *skb;
1210 int err;
1211
1212 if (flags&MSG_OOB)
1213 return -EOPNOTSUPP;
1214
1215 copied = 0;
1216
1217 skb = skb_recv_datagram(sk,flags,noblock,&err);
1218 if (skb==NULL)
1219 goto out;
1220
1221 msg->msg_namelen = 0;
1222
1223 copied = skb->len;
1224 if (len < copied) {
1225 msg->msg_flags |= MSG_TRUNC;
1226 copied = len;
1227 }
1228
badff6d0 1229 skb_reset_transport_header(skb);
1da177e4
LT
1230 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1231
1232 if (msg->msg_name) {
1233 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1234 addr->nl_family = AF_NETLINK;
1235 addr->nl_pad = 0;
1236 addr->nl_pid = NETLINK_CB(skb).pid;
d629b836 1237 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
1238 msg->msg_namelen = sizeof(*addr);
1239 }
1240
cc9a06cd
PM
1241 if (nlk->flags & NETLINK_RECV_PKTINFO)
1242 netlink_cmsg_recv_pktinfo(msg, skb);
1243
1da177e4
LT
1244 if (NULL == siocb->scm) {
1245 memset(&scm, 0, sizeof(scm));
1246 siocb->scm = &scm;
1247 }
1248 siocb->scm->creds = *NETLINK_CREDS(skb);
1249 skb_free_datagram(sk, skb);
1250
1251 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1252 netlink_dump(sk);
1253
1254 scm_recv(sock, msg, siocb->scm, flags);
1255
b558ff79
DM
1256 if (flags & MSG_TRUNC)
1257 copied = skb->len;
1258
1da177e4
LT
1259out:
1260 netlink_rcv_wake(sk);
1261 return err ? : copied;
1262}
1263
1264static void netlink_data_ready(struct sock *sk, int len)
1265{
1266 struct netlink_sock *nlk = nlk_sk(sk);
1267
1268 if (nlk->data_ready)
1269 nlk->data_ready(sk, len);
1270 netlink_rcv_wake(sk);
1271}
1272
1273/*
746fac4d 1274 * We export these functions to other modules. They provide a
1da177e4
LT
1275 * complete set of kernel non-blocking support for message
1276 * queueing.
1277 */
1278
1279struct sock *
06628607 1280netlink_kernel_create(int unit, unsigned int groups,
746fac4d 1281 void (*input)(struct sock *sk, int len),
af65bdfc 1282 struct mutex *cb_mutex, struct module *module)
1da177e4
LT
1283{
1284 struct socket *sock;
1285 struct sock *sk;
77247bbb 1286 struct netlink_sock *nlk;
4277a083 1287 unsigned long *listeners = NULL;
1da177e4 1288
fab2caf6 1289 BUG_ON(!nl_table);
1da177e4
LT
1290
1291 if (unit<0 || unit>=MAX_LINKS)
1292 return NULL;
1293
1294 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1295 return NULL;
1296
af65bdfc 1297 if (__netlink_create(sock, cb_mutex, unit) < 0)
77247bbb 1298 goto out_sock_release;
4fdb3bb7 1299
4277a083
PM
1300 if (groups < 32)
1301 groups = 32;
1302
1303 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1304 if (!listeners)
1305 goto out_sock_release;
1306
1da177e4
LT
1307 sk = sock->sk;
1308 sk->sk_data_ready = netlink_data_ready;
1309 if (input)
1310 nlk_sk(sk)->data_ready = input;
1311
77247bbb
PM
1312 if (netlink_insert(sk, 0))
1313 goto out_sock_release;
4fdb3bb7 1314
77247bbb
PM
1315 nlk = nlk_sk(sk);
1316 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 1317
4fdb3bb7 1318 netlink_table_grab();
4277a083
PM
1319 nl_table[unit].groups = groups;
1320 nl_table[unit].listeners = listeners;
af65bdfc 1321 nl_table[unit].cb_mutex = cb_mutex;
77247bbb 1322 nl_table[unit].module = module;
ab33a171 1323 nl_table[unit].registered = 1;
4fdb3bb7 1324 netlink_table_ungrab();
77247bbb
PM
1325
1326 return sk;
1327
4fdb3bb7 1328out_sock_release:
4277a083 1329 kfree(listeners);
4fdb3bb7 1330 sock_release(sock);
77247bbb 1331 return NULL;
1da177e4
LT
1332}
1333
1334void netlink_set_nonroot(int protocol, unsigned int flags)
746fac4d
YH
1335{
1336 if ((unsigned int)protocol < MAX_LINKS)
1da177e4 1337 nl_table[protocol].nl_nonroot = flags;
746fac4d 1338}
1da177e4
LT
1339
1340static void netlink_destroy_callback(struct netlink_callback *cb)
1341{
1342 if (cb->skb)
1343 kfree_skb(cb->skb);
1344 kfree(cb);
1345}
1346
1347/*
1348 * It looks a bit ugly.
1349 * It would be better to create kernel thread.
1350 */
1351
1352static int netlink_dump(struct sock *sk)
1353{
1354 struct netlink_sock *nlk = nlk_sk(sk);
1355 struct netlink_callback *cb;
1356 struct sk_buff *skb;
1357 struct nlmsghdr *nlh;
bf8b79e4 1358 int len, err = -ENOBUFS;
746fac4d 1359
1da177e4
LT
1360 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1361 if (!skb)
bf8b79e4 1362 goto errout;
1da177e4 1363
af65bdfc 1364 mutex_lock(nlk->cb_mutex);
1da177e4
LT
1365
1366 cb = nlk->cb;
1367 if (cb == NULL) {
bf8b79e4
TG
1368 err = -EINVAL;
1369 goto errout_skb;
1da177e4
LT
1370 }
1371
1372 len = cb->dump(skb, cb);
1373
1374 if (len > 0) {
af65bdfc 1375 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1376 skb_queue_tail(&sk->sk_receive_queue, skb);
1377 sk->sk_data_ready(sk, len);
1378 return 0;
1379 }
1380
bf8b79e4
TG
1381 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1382 if (!nlh)
1383 goto errout_skb;
1384
1385 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1386
1da177e4
LT
1387 skb_queue_tail(&sk->sk_receive_queue, skb);
1388 sk->sk_data_ready(sk, skb->len);
1389
a8f74b22
TG
1390 if (cb->done)
1391 cb->done(cb);
1da177e4 1392 nlk->cb = NULL;
af65bdfc 1393 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1394
1395 netlink_destroy_callback(cb);
1da177e4 1396 return 0;
1797754e 1397
bf8b79e4 1398errout_skb:
af65bdfc 1399 mutex_unlock(nlk->cb_mutex);
bf8b79e4
TG
1400 kfree_skb(skb);
1401errout:
1402 return err;
1da177e4
LT
1403}
1404
1405int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1406 struct nlmsghdr *nlh,
1407 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1408 int (*done)(struct netlink_callback*))
1409{
1410 struct netlink_callback *cb;
1411 struct sock *sk;
1412 struct netlink_sock *nlk;
1413
0da974f4 1414 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1da177e4
LT
1415 if (cb == NULL)
1416 return -ENOBUFS;
1417
1da177e4
LT
1418 cb->dump = dump;
1419 cb->done = done;
1420 cb->nlh = nlh;
1421 atomic_inc(&skb->users);
1422 cb->skb = skb;
1423
1424 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1425 if (sk == NULL) {
1426 netlink_destroy_callback(cb);
1427 return -ECONNREFUSED;
1428 }
1429 nlk = nlk_sk(sk);
3f660d66 1430 /* A dump is in progress... */
af65bdfc 1431 mutex_lock(nlk->cb_mutex);
3f660d66 1432 if (nlk->cb) {
af65bdfc 1433 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1434 netlink_destroy_callback(cb);
1435 sock_put(sk);
1436 return -EBUSY;
1437 }
1438 nlk->cb = cb;
af65bdfc 1439 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1440
1441 netlink_dump(sk);
1442 sock_put(sk);
c702e804
TG
1443
1444 /* We successfully started a dump, by returning -EINTR we
1445 * signal the queue mangement to interrupt processing of
1446 * any netlink messages so userspace gets a chance to read
1447 * the results. */
1448 return -EINTR;
1da177e4
LT
1449}
1450
1451void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1452{
1453 struct sk_buff *skb;
1454 struct nlmsghdr *rep;
1455 struct nlmsgerr *errmsg;
339bf98f 1456 size_t payload = sizeof(*errmsg);
1da177e4 1457
339bf98f
TG
1458 /* error messages get the original request appened */
1459 if (err)
1460 payload += nlmsg_len(nlh);
1da177e4 1461
339bf98f 1462 skb = nlmsg_new(payload, GFP_KERNEL);
1da177e4
LT
1463 if (!skb) {
1464 struct sock *sk;
1465
1466 sk = netlink_lookup(in_skb->sk->sk_protocol,
1467 NETLINK_CB(in_skb).pid);
1468 if (sk) {
1469 sk->sk_err = ENOBUFS;
1470 sk->sk_error_report(sk);
1471 sock_put(sk);
1472 }
1473 return;
1474 }
1475
1476 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1797754e 1477 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
bf8b79e4 1478 errmsg = nlmsg_data(rep);
1da177e4 1479 errmsg->error = err;
bf8b79e4 1480 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1da177e4
LT
1481 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1482}
1483
82ace47a 1484static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 1485 struct nlmsghdr *))
82ace47a 1486{
82ace47a
TG
1487 struct nlmsghdr *nlh;
1488 int err;
1489
1490 while (skb->len >= nlmsg_total_size(0)) {
b529ccf2 1491 nlh = nlmsg_hdr(skb);
d35b6856 1492 err = 0;
82ace47a 1493
ad8e4b75 1494 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
1495 return 0;
1496
d35b6856
TG
1497 /* Only requests are handled by the kernel */
1498 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1499 goto skip;
45e7ae7f
TG
1500
1501 /* Skip control messages */
1502 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1503 goto skip;
d35b6856 1504
1d00a4eb
TG
1505 err = cb(skb, nlh);
1506 if (err == -EINTR) {
1507 /* Not an error, but we interrupt processing */
1508 netlink_queue_skip(nlh, skb);
1509 return err;
d35b6856
TG
1510 }
1511skip:
1512 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 1513 netlink_ack(skb, nlh, err);
82ace47a 1514
bf8b79e4 1515 netlink_queue_skip(nlh, skb);
82ace47a
TG
1516 }
1517
1518 return 0;
1519}
1520
1521/**
1522 * nelink_run_queue - Process netlink receive queue.
1523 * @sk: Netlink socket containing the queue
1524 * @qlen: Place to store queue length upon entry
1525 * @cb: Callback function invoked for each netlink message found
1526 *
1527 * Processes as much as there was in the queue upon entry and invokes
1528 * a callback function for each netlink message found. The callback
1529 * function may refuse a message by returning a negative error code
1530 * but setting the error pointer to 0 in which case this function
1531 * returns with a qlen != 0.
1532 *
1533 * qlen must be initialized to 0 before the initial entry, afterwards
1534 * the function may be called repeatedly until qlen reaches 0.
1d00a4eb
TG
1535 *
1536 * The callback function may return -EINTR to signal that processing
1537 * of netlink messages shall be interrupted. In this case the message
1538 * currently being processed will NOT be requeued onto the receive
1539 * queue.
82ace47a
TG
1540 */
1541void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1d00a4eb 1542 int (*cb)(struct sk_buff *, struct nlmsghdr *))
82ace47a
TG
1543{
1544 struct sk_buff *skb;
1545
1546 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1547 *qlen = skb_queue_len(&sk->sk_receive_queue);
1548
1549 for (; *qlen; (*qlen)--) {
1550 skb = skb_dequeue(&sk->sk_receive_queue);
1551 if (netlink_rcv_skb(skb, cb)) {
1552 if (skb->len)
1553 skb_queue_head(&sk->sk_receive_queue, skb);
1554 else {
1555 kfree_skb(skb);
1556 (*qlen)--;
1557 }
1558 break;
1559 }
1560
1561 kfree_skb(skb);
1562 }
1563}
1564
1565/**
1566 * netlink_queue_skip - Skip netlink message while processing queue.
1567 * @nlh: Netlink message to be skipped
1568 * @skb: Socket buffer containing the netlink messages.
1569 *
1570 * Pulls the given netlink message off the socket buffer so the next
1571 * call to netlink_queue_run() will not reconsider the message.
1572 */
42bad1da 1573static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
82ace47a
TG
1574{
1575 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1576
1577 if (msglen > skb->len)
1578 msglen = skb->len;
1579
1580 skb_pull(skb, msglen);
1581}
1da177e4 1582
d387f6ad
TG
1583/**
1584 * nlmsg_notify - send a notification netlink message
1585 * @sk: netlink socket to use
1586 * @skb: notification message
1587 * @pid: destination netlink pid for reports or 0
1588 * @group: destination multicast group or 0
1589 * @report: 1 to report back, 0 to disable
1590 * @flags: allocation flags
1591 */
1592int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1593 unsigned int group, int report, gfp_t flags)
1594{
1595 int err = 0;
1596
1597 if (group) {
1598 int exclude_pid = 0;
1599
1600 if (report) {
1601 atomic_inc(&skb->users);
1602 exclude_pid = pid;
1603 }
1604
1605 /* errors reported via destination sk->sk_err */
1606 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1607 }
1608
1609 if (report)
1610 err = nlmsg_unicast(sk, skb, pid);
1611
1612 return err;
1613}
1614
1da177e4
LT
1615#ifdef CONFIG_PROC_FS
1616struct nl_seq_iter {
1617 int link;
1618 int hash_idx;
1619};
1620
1621static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1622{
1623 struct nl_seq_iter *iter = seq->private;
1624 int i, j;
1625 struct sock *s;
1626 struct hlist_node *node;
1627 loff_t off = 0;
1628
1629 for (i=0; i<MAX_LINKS; i++) {
1630 struct nl_pid_hash *hash = &nl_table[i].hash;
1631
1632 for (j = 0; j <= hash->mask; j++) {
1633 sk_for_each(s, node, &hash->table[j]) {
1634 if (off == pos) {
1635 iter->link = i;
1636 iter->hash_idx = j;
1637 return s;
1638 }
1639 ++off;
1640 }
1641 }
1642 }
1643 return NULL;
1644}
1645
1646static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1647{
1648 read_lock(&nl_table_lock);
1649 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1650}
1651
1652static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1653{
1654 struct sock *s;
1655 struct nl_seq_iter *iter;
1656 int i, j;
1657
1658 ++*pos;
1659
1660 if (v == SEQ_START_TOKEN)
1661 return netlink_seq_socket_idx(seq, 0);
746fac4d 1662
1da177e4
LT
1663 s = sk_next(v);
1664 if (s)
1665 return s;
1666
1667 iter = seq->private;
1668 i = iter->link;
1669 j = iter->hash_idx + 1;
1670
1671 do {
1672 struct nl_pid_hash *hash = &nl_table[i].hash;
1673
1674 for (; j <= hash->mask; j++) {
1675 s = sk_head(&hash->table[j]);
1676 if (s) {
1677 iter->link = i;
1678 iter->hash_idx = j;
1679 return s;
1680 }
1681 }
1682
1683 j = 0;
1684 } while (++i < MAX_LINKS);
1685
1686 return NULL;
1687}
1688
1689static void netlink_seq_stop(struct seq_file *seq, void *v)
1690{
1691 read_unlock(&nl_table_lock);
1692}
1693
1694
1695static int netlink_seq_show(struct seq_file *seq, void *v)
1696{
1697 if (v == SEQ_START_TOKEN)
1698 seq_puts(seq,
1699 "sk Eth Pid Groups "
1700 "Rmem Wmem Dump Locks\n");
1701 else {
1702 struct sock *s = v;
1703 struct netlink_sock *nlk = nlk_sk(s);
1704
1705 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1706 s,
1707 s->sk_protocol,
1708 nlk->pid,
513c2500 1709 nlk->groups ? (u32)nlk->groups[0] : 0,
1da177e4
LT
1710 atomic_read(&s->sk_rmem_alloc),
1711 atomic_read(&s->sk_wmem_alloc),
1712 nlk->cb,
1713 atomic_read(&s->sk_refcnt)
1714 );
1715
1716 }
1717 return 0;
1718}
1719
1720static struct seq_operations netlink_seq_ops = {
1721 .start = netlink_seq_start,
1722 .next = netlink_seq_next,
1723 .stop = netlink_seq_stop,
1724 .show = netlink_seq_show,
1725};
1726
1727
1728static int netlink_seq_open(struct inode *inode, struct file *file)
1729{
1730 struct seq_file *seq;
1731 struct nl_seq_iter *iter;
1732 int err;
1733
0da974f4 1734 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1da177e4
LT
1735 if (!iter)
1736 return -ENOMEM;
1737
1738 err = seq_open(file, &netlink_seq_ops);
1739 if (err) {
1740 kfree(iter);
1741 return err;
1742 }
1743
1da177e4
LT
1744 seq = file->private_data;
1745 seq->private = iter;
1746 return 0;
1747}
1748
da7071d7 1749static const struct file_operations netlink_seq_fops = {
1da177e4
LT
1750 .owner = THIS_MODULE,
1751 .open = netlink_seq_open,
1752 .read = seq_read,
1753 .llseek = seq_lseek,
1754 .release = seq_release_private,
1755};
1756
1757#endif
1758
1759int netlink_register_notifier(struct notifier_block *nb)
1760{
e041c683 1761 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4
LT
1762}
1763
1764int netlink_unregister_notifier(struct notifier_block *nb)
1765{
e041c683 1766 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 1767}
746fac4d 1768
90ddc4f0 1769static const struct proto_ops netlink_ops = {
1da177e4
LT
1770 .family = PF_NETLINK,
1771 .owner = THIS_MODULE,
1772 .release = netlink_release,
1773 .bind = netlink_bind,
1774 .connect = netlink_connect,
1775 .socketpair = sock_no_socketpair,
1776 .accept = sock_no_accept,
1777 .getname = netlink_getname,
1778 .poll = datagram_poll,
1779 .ioctl = sock_no_ioctl,
1780 .listen = sock_no_listen,
1781 .shutdown = sock_no_shutdown,
9a4595bc
PM
1782 .setsockopt = netlink_setsockopt,
1783 .getsockopt = netlink_getsockopt,
1da177e4
LT
1784 .sendmsg = netlink_sendmsg,
1785 .recvmsg = netlink_recvmsg,
1786 .mmap = sock_no_mmap,
1787 .sendpage = sock_no_sendpage,
1788};
1789
1790static struct net_proto_family netlink_family_ops = {
1791 .family = PF_NETLINK,
1792 .create = netlink_create,
1793 .owner = THIS_MODULE, /* for consistency 8) */
1794};
1795
1da177e4
LT
1796static int __init netlink_proto_init(void)
1797{
1798 struct sk_buff *dummy_skb;
1799 int i;
1800 unsigned long max;
1801 unsigned int order;
1802 int err = proto_register(&netlink_proto, 0);
1803
1804 if (err != 0)
1805 goto out;
1806
ef047f5e 1807 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1da177e4 1808
0da974f4 1809 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
1810 if (!nl_table)
1811 goto panic;
1da177e4 1812
1da177e4
LT
1813 if (num_physpages >= (128 * 1024))
1814 max = num_physpages >> (21 - PAGE_SHIFT);
1815 else
1816 max = num_physpages >> (23 - PAGE_SHIFT);
1817
1818 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1819 max = (1UL << order) / sizeof(struct hlist_head);
1820 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1821
1822 for (i = 0; i < MAX_LINKS; i++) {
1823 struct nl_pid_hash *hash = &nl_table[i].hash;
1824
1825 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1826 if (!hash->table) {
1827 while (i-- > 0)
1828 nl_pid_hash_free(nl_table[i].hash.table,
1829 1 * sizeof(*hash->table));
1830 kfree(nl_table);
fab2caf6 1831 goto panic;
1da177e4
LT
1832 }
1833 memset(hash->table, 0, 1 * sizeof(*hash->table));
1834 hash->max_shift = order;
1835 hash->shift = 0;
1836 hash->mask = 0;
1837 hash->rehash_time = jiffies;
1838 }
1839
1840 sock_register(&netlink_family_ops);
1841#ifdef CONFIG_PROC_FS
1842 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1843#endif
746fac4d 1844 /* The netlink device handler may be needed early. */
1da177e4
LT
1845 rtnetlink_init();
1846out:
1847 return err;
fab2caf6
AM
1848panic:
1849 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
1850}
1851
1da177e4 1852core_initcall(netlink_proto_init);
1da177e4
LT
1853
1854EXPORT_SYMBOL(netlink_ack);
82ace47a 1855EXPORT_SYMBOL(netlink_run_queue);
1da177e4
LT
1856EXPORT_SYMBOL(netlink_broadcast);
1857EXPORT_SYMBOL(netlink_dump_start);
1858EXPORT_SYMBOL(netlink_kernel_create);
1859EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
1860EXPORT_SYMBOL(netlink_set_nonroot);
1861EXPORT_SYMBOL(netlink_unicast);
1862EXPORT_SYMBOL(netlink_unregister_notifier);
d387f6ad 1863EXPORT_SYMBOL(nlmsg_notify);