]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/ip6mr.c
ipv6: ip6mr: move unres_queue and timer to per-namespace data
[net-next-2.6.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
7bc570c8
YH
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
7bc570c8
YH
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
7bc570c8 35#include <linux/init.h>
5a0e3ad6 36#include <linux/slab.h>
7bc570c8
YH
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
7bc570c8 40#include <net/raw.h>
7bc570c8
YH
41#include <linux/notifier.h>
42#include <linux/if_arp.h>
7bc570c8
YH
43#include <net/checksum.h>
44#include <net/netlink.h>
45
46#include <net/ipv6.h>
47#include <net/ip6_route.h>
48#include <linux/mroute6.h>
14fb64e1 49#include <linux/pim.h>
7bc570c8
YH
50#include <net/addrconf.h>
51#include <linux/netfilter_ipv6.h>
5d6e430d 52#include <net/ip6_checksum.h>
7bc570c8 53
7bc570c8
YH
54/* Big lock, protecting vif table, mrt cache and mroute socket state.
55 Note that the changes are semaphored via rtnl_lock.
56 */
57
58static DEFINE_RWLOCK(mrt_lock);
59
60/*
61 * Multicast router control variables
62 */
63
4e16880c 64#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL)
7bc570c8 65
7bc570c8
YH
66/* Special spinlock for queue of unresolved entries */
67static DEFINE_SPINLOCK(mfc_unres_lock);
68
69/* We return to original Alan's scheme. Hash table of resolved
70 entries is changed only in process context and protected
71 with weak lock mrt_lock. Queue of unresolved entries is protected
72 with strong spinlock mfc_unres_lock.
73
74 In this case data path is free of exclusive locks at all.
75 */
76
77static struct kmem_cache *mrt_cachep __read_mostly;
78
79static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
8229efda
BT
80static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
81 mifi_t mifi, int assert);
7bc570c8 82static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
8229efda 83static void mroute_clean_tables(struct net *net);
7bc570c8 84
7bc570c8
YH
85
86#ifdef CONFIG_PROC_FS
87
88struct ipmr_mfc_iter {
8b90fc7e 89 struct seq_net_private p;
7bc570c8
YH
90 struct mfc6_cache **cache;
91 int ct;
92};
93
94
8b90fc7e
BT
95static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
96 struct ipmr_mfc_iter *it, loff_t pos)
7bc570c8
YH
97{
98 struct mfc6_cache *mfc;
99
8b90fc7e 100 it->cache = net->ipv6.mfc6_cache_array;
7bc570c8 101 read_lock(&mrt_lock);
4a6258a0 102 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++)
8b90fc7e 103 for (mfc = net->ipv6.mfc6_cache_array[it->ct];
4a6258a0 104 mfc; mfc = mfc->next)
7bc570c8
YH
105 if (pos-- == 0)
106 return mfc;
107 read_unlock(&mrt_lock);
108
c476efbc 109 it->cache = &net->ipv6.mfc6_unres_queue;
7bc570c8 110 spin_lock_bh(&mfc_unres_lock);
c476efbc
PM
111 for (mfc = net->ipv6.mfc6_unres_queue; mfc; mfc = mfc->next)
112 if (pos-- == 0)
7bc570c8
YH
113 return mfc;
114 spin_unlock_bh(&mfc_unres_lock);
115
116 it->cache = NULL;
117 return NULL;
118}
119
120
121
122
123/*
124 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
125 */
126
127struct ipmr_vif_iter {
8b90fc7e 128 struct seq_net_private p;
7bc570c8
YH
129 int ct;
130};
131
8b90fc7e
BT
132static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
133 struct ipmr_vif_iter *iter,
7bc570c8
YH
134 loff_t pos)
135{
8b90fc7e
BT
136 for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) {
137 if (!MIF_EXISTS(net, iter->ct))
7bc570c8
YH
138 continue;
139 if (pos-- == 0)
8b90fc7e 140 return &net->ipv6.vif6_table[iter->ct];
7bc570c8
YH
141 }
142 return NULL;
143}
144
145static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
146 __acquires(mrt_lock)
147{
8b90fc7e
BT
148 struct net *net = seq_file_net(seq);
149
7bc570c8 150 read_lock(&mrt_lock);
8b90fc7e
BT
151 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
152 : SEQ_START_TOKEN;
7bc570c8
YH
153}
154
155static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
156{
157 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 158 struct net *net = seq_file_net(seq);
7bc570c8
YH
159
160 ++*pos;
161 if (v == SEQ_START_TOKEN)
8b90fc7e 162 return ip6mr_vif_seq_idx(net, iter, 0);
7bc570c8 163
8b90fc7e
BT
164 while (++iter->ct < net->ipv6.maxvif) {
165 if (!MIF_EXISTS(net, iter->ct))
7bc570c8 166 continue;
8b90fc7e 167 return &net->ipv6.vif6_table[iter->ct];
7bc570c8
YH
168 }
169 return NULL;
170}
171
172static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
173 __releases(mrt_lock)
174{
175 read_unlock(&mrt_lock);
176}
177
178static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
179{
8b90fc7e
BT
180 struct net *net = seq_file_net(seq);
181
7bc570c8
YH
182 if (v == SEQ_START_TOKEN) {
183 seq_puts(seq,
184 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
185 } else {
186 const struct mif_device *vif = v;
187 const char *name = vif->dev ? vif->dev->name : "none";
188
189 seq_printf(seq,
d430a227 190 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
8b90fc7e 191 vif - net->ipv6.vif6_table,
7bc570c8
YH
192 name, vif->bytes_in, vif->pkt_in,
193 vif->bytes_out, vif->pkt_out,
194 vif->flags);
195 }
196 return 0;
197}
198
98147d52 199static const struct seq_operations ip6mr_vif_seq_ops = {
7bc570c8
YH
200 .start = ip6mr_vif_seq_start,
201 .next = ip6mr_vif_seq_next,
202 .stop = ip6mr_vif_seq_stop,
203 .show = ip6mr_vif_seq_show,
204};
205
206static int ip6mr_vif_open(struct inode *inode, struct file *file)
207{
8b90fc7e
BT
208 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
209 sizeof(struct ipmr_vif_iter));
7bc570c8
YH
210}
211
5ca1b998 212static const struct file_operations ip6mr_vif_fops = {
7bc570c8
YH
213 .owner = THIS_MODULE,
214 .open = ip6mr_vif_open,
215 .read = seq_read,
216 .llseek = seq_lseek,
8b90fc7e 217 .release = seq_release_net,
7bc570c8
YH
218};
219
220static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
221{
8b90fc7e
BT
222 struct net *net = seq_file_net(seq);
223
224 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
225 : SEQ_START_TOKEN;
7bc570c8
YH
226}
227
228static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229{
230 struct mfc6_cache *mfc = v;
231 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 232 struct net *net = seq_file_net(seq);
7bc570c8
YH
233
234 ++*pos;
235
236 if (v == SEQ_START_TOKEN)
8b90fc7e 237 return ipmr_mfc_seq_idx(net, seq->private, 0);
7bc570c8
YH
238
239 if (mfc->next)
240 return mfc->next;
241
c476efbc 242 if (it->cache == &net->ipv6.mfc6_unres_queue)
7bc570c8
YH
243 goto end_of_list;
244
8b90fc7e 245 BUG_ON(it->cache != net->ipv6.mfc6_cache_array);
7bc570c8 246
4a6258a0 247 while (++it->ct < MFC6_LINES) {
8b90fc7e 248 mfc = net->ipv6.mfc6_cache_array[it->ct];
7bc570c8
YH
249 if (mfc)
250 return mfc;
251 }
252
253 /* exhausted cache_array, show unresolved */
254 read_unlock(&mrt_lock);
c476efbc 255 it->cache = &net->ipv6.mfc6_unres_queue;
7bc570c8
YH
256 it->ct = 0;
257
258 spin_lock_bh(&mfc_unres_lock);
c476efbc 259 mfc = net->ipv6.mfc6_unres_queue;
7bc570c8
YH
260 if (mfc)
261 return mfc;
262
263 end_of_list:
264 spin_unlock_bh(&mfc_unres_lock);
265 it->cache = NULL;
266
267 return NULL;
268}
269
270static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
271{
272 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 273 struct net *net = seq_file_net(seq);
7bc570c8 274
c476efbc 275 if (it->cache == &net->ipv6.mfc6_unres_queue)
7bc570c8 276 spin_unlock_bh(&mfc_unres_lock);
8b90fc7e 277 else if (it->cache == net->ipv6.mfc6_cache_array)
7bc570c8
YH
278 read_unlock(&mrt_lock);
279}
280
281static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
282{
283 int n;
8b90fc7e 284 struct net *net = seq_file_net(seq);
7bc570c8
YH
285
286 if (v == SEQ_START_TOKEN) {
287 seq_puts(seq,
288 "Group "
289 "Origin "
290 "Iif Pkts Bytes Wrong Oifs\n");
291 } else {
292 const struct mfc6_cache *mfc = v;
293 const struct ipmr_mfc_iter *it = seq->private;
294
999890b2 295 seq_printf(seq, "%pI6 %pI6 %-3hd",
0c6ce78a 296 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
1ea472e2 297 mfc->mf6c_parent);
7bc570c8 298
c476efbc 299 if (it->cache != &net->ipv6.mfc6_unres_queue) {
1ea472e2
BT
300 seq_printf(seq, " %8lu %8lu %8lu",
301 mfc->mfc_un.res.pkt,
302 mfc->mfc_un.res.bytes,
303 mfc->mfc_un.res.wrong_if);
7bc570c8
YH
304 for (n = mfc->mfc_un.res.minvif;
305 n < mfc->mfc_un.res.maxvif; n++) {
8b90fc7e 306 if (MIF_EXISTS(net, n) &&
7bc570c8
YH
307 mfc->mfc_un.res.ttls[n] < 255)
308 seq_printf(seq,
309 " %2d:%-3d",
310 n, mfc->mfc_un.res.ttls[n]);
311 }
1ea472e2
BT
312 } else {
313 /* unresolved mfc_caches don't contain
314 * pkt, bytes and wrong_if values
315 */
316 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
7bc570c8
YH
317 }
318 seq_putc(seq, '\n');
319 }
320 return 0;
321}
322
88e9d34c 323static const struct seq_operations ipmr_mfc_seq_ops = {
7bc570c8
YH
324 .start = ipmr_mfc_seq_start,
325 .next = ipmr_mfc_seq_next,
326 .stop = ipmr_mfc_seq_stop,
327 .show = ipmr_mfc_seq_show,
328};
329
330static int ipmr_mfc_open(struct inode *inode, struct file *file)
331{
8b90fc7e
BT
332 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
333 sizeof(struct ipmr_mfc_iter));
7bc570c8
YH
334}
335
5ca1b998 336static const struct file_operations ip6mr_mfc_fops = {
7bc570c8
YH
337 .owner = THIS_MODULE,
338 .open = ipmr_mfc_open,
339 .read = seq_read,
340 .llseek = seq_lseek,
8b90fc7e 341 .release = seq_release_net,
7bc570c8
YH
342};
343#endif
344
14fb64e1 345#ifdef CONFIG_IPV6_PIMSM_V2
14fb64e1
YH
346
347static int pim6_rcv(struct sk_buff *skb)
348{
349 struct pimreghdr *pim;
350 struct ipv6hdr *encap;
351 struct net_device *reg_dev = NULL;
8229efda
BT
352 struct net *net = dev_net(skb->dev);
353 int reg_vif_num = net->ipv6.mroute_reg_vif_num;
14fb64e1
YH
354
355 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
356 goto drop;
357
358 pim = (struct pimreghdr *)skb_transport_header(skb);
359 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
360 (pim->flags & PIM_NULL_REGISTER) ||
1d6e55f1
TG
361 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
362 sizeof(*pim), IPPROTO_PIM,
363 csum_partial((void *)pim, sizeof(*pim), 0)) &&
ec6b486f 364 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
365 goto drop;
366
367 /* check if the inner packet is destined to mcast group */
368 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
369 sizeof(*pim));
370
371 if (!ipv6_addr_is_multicast(&encap->daddr) ||
372 encap->payload_len == 0 ||
373 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
374 goto drop;
375
376 read_lock(&mrt_lock);
377 if (reg_vif_num >= 0)
8229efda 378 reg_dev = net->ipv6.vif6_table[reg_vif_num].dev;
14fb64e1
YH
379 if (reg_dev)
380 dev_hold(reg_dev);
381 read_unlock(&mrt_lock);
382
383 if (reg_dev == NULL)
384 goto drop;
385
386 skb->mac_header = skb->network_header;
387 skb_pull(skb, (u8 *)encap - skb->data);
388 skb_reset_network_header(skb);
389 skb->dev = reg_dev;
1d6e55f1 390 skb->protocol = htons(ETH_P_IPV6);
14fb64e1
YH
391 skb->ip_summed = 0;
392 skb->pkt_type = PACKET_HOST;
adf30907 393 skb_dst_drop(skb);
dc58c78c
PE
394 reg_dev->stats.rx_bytes += skb->len;
395 reg_dev->stats.rx_packets++;
14fb64e1
YH
396 nf_reset(skb);
397 netif_rx(skb);
398 dev_put(reg_dev);
399 return 0;
400 drop:
401 kfree_skb(skb);
402 return 0;
403}
404
41135cc8 405static const struct inet6_protocol pim6_protocol = {
14fb64e1
YH
406 .handler = pim6_rcv,
407};
408
409/* Service routines creating virtual interfaces: PIMREG */
410
6fef4c0c
SH
411static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
412 struct net_device *dev)
14fb64e1 413{
8229efda
BT
414 struct net *net = dev_net(dev);
415
14fb64e1 416 read_lock(&mrt_lock);
dc58c78c
PE
417 dev->stats.tx_bytes += skb->len;
418 dev->stats.tx_packets++;
8229efda
BT
419 ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num,
420 MRT6MSG_WHOLEPKT);
14fb64e1
YH
421 read_unlock(&mrt_lock);
422 kfree_skb(skb);
6ed10654 423 return NETDEV_TX_OK;
14fb64e1
YH
424}
425
007c3838
SH
426static const struct net_device_ops reg_vif_netdev_ops = {
427 .ndo_start_xmit = reg_vif_xmit,
428};
429
14fb64e1
YH
430static void reg_vif_setup(struct net_device *dev)
431{
432 dev->type = ARPHRD_PIMREG;
433 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
434 dev->flags = IFF_NOARP;
007c3838 435 dev->netdev_ops = &reg_vif_netdev_ops;
14fb64e1 436 dev->destructor = free_netdev;
403dbb97 437 dev->features |= NETIF_F_NETNS_LOCAL;
14fb64e1
YH
438}
439
8229efda 440static struct net_device *ip6mr_reg_vif(struct net *net)
14fb64e1
YH
441{
442 struct net_device *dev;
14fb64e1 443
dc58c78c 444 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
14fb64e1
YH
445 if (dev == NULL)
446 return NULL;
447
8229efda
BT
448 dev_net_set(dev, net);
449
14fb64e1
YH
450 if (register_netdevice(dev)) {
451 free_netdev(dev);
452 return NULL;
453 }
454 dev->iflink = 0;
455
14fb64e1
YH
456 if (dev_open(dev))
457 goto failure;
458
7af3db78 459 dev_hold(dev);
14fb64e1
YH
460 return dev;
461
462failure:
463 /* allow the register to be completed before unregistering. */
464 rtnl_unlock();
465 rtnl_lock();
466
467 unregister_netdevice(dev);
468 return NULL;
469}
470#endif
471
7bc570c8
YH
472/*
473 * Delete a VIF entry
474 */
475
c871e664 476static int mif6_delete(struct net *net, int vifi, struct list_head *head)
7bc570c8
YH
477{
478 struct mif_device *v;
479 struct net_device *dev;
1d6e55f1 480 struct inet6_dev *in6_dev;
8229efda 481 if (vifi < 0 || vifi >= net->ipv6.maxvif)
7bc570c8
YH
482 return -EADDRNOTAVAIL;
483
8229efda 484 v = &net->ipv6.vif6_table[vifi];
7bc570c8
YH
485
486 write_lock_bh(&mrt_lock);
487 dev = v->dev;
488 v->dev = NULL;
489
490 if (!dev) {
491 write_unlock_bh(&mrt_lock);
492 return -EADDRNOTAVAIL;
493 }
494
14fb64e1 495#ifdef CONFIG_IPV6_PIMSM_V2
8229efda
BT
496 if (vifi == net->ipv6.mroute_reg_vif_num)
497 net->ipv6.mroute_reg_vif_num = -1;
14fb64e1
YH
498#endif
499
8229efda 500 if (vifi + 1 == net->ipv6.maxvif) {
7bc570c8
YH
501 int tmp;
502 for (tmp = vifi - 1; tmp >= 0; tmp--) {
8229efda 503 if (MIF_EXISTS(net, tmp))
7bc570c8
YH
504 break;
505 }
8229efda 506 net->ipv6.maxvif = tmp + 1;
7bc570c8
YH
507 }
508
509 write_unlock_bh(&mrt_lock);
510
511 dev_set_allmulti(dev, -1);
512
1d6e55f1
TG
513 in6_dev = __in6_dev_get(dev);
514 if (in6_dev)
515 in6_dev->cnf.mc_forwarding--;
516
7bc570c8 517 if (v->flags & MIFF_REGISTER)
c871e664 518 unregister_netdevice_queue(dev, head);
7bc570c8
YH
519
520 dev_put(dev);
521 return 0;
522}
523
58701ad4
BT
524static inline void ip6mr_cache_free(struct mfc6_cache *c)
525{
526 release_net(mfc6_net(c));
527 kmem_cache_free(mrt_cachep, c);
528}
529
7bc570c8
YH
530/* Destroy an unresolved cache entry, killing queued skbs
531 and reporting error to netlink readers.
532 */
533
534static void ip6mr_destroy_unres(struct mfc6_cache *c)
535{
536 struct sk_buff *skb;
8229efda 537 struct net *net = mfc6_net(c);
7bc570c8 538
8229efda 539 atomic_dec(&net->ipv6.cache_resolve_queue_len);
7bc570c8
YH
540
541 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
542 if (ipv6_hdr(skb)->version == 0) {
543 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
544 nlh->nlmsg_type = NLMSG_ERROR;
545 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
546 skb_trim(skb, nlh->nlmsg_len);
547 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
8229efda 548 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
7bc570c8
YH
549 } else
550 kfree_skb(skb);
551 }
552
58701ad4 553 ip6mr_cache_free(c);
7bc570c8
YH
554}
555
556
c476efbc 557/* Timer process for all the unresolved queue. */
7bc570c8 558
c476efbc 559static void ipmr_do_expire_process(struct net *net)
7bc570c8
YH
560{
561 unsigned long now = jiffies;
562 unsigned long expires = 10 * HZ;
563 struct mfc6_cache *c, **cp;
564
c476efbc 565 cp = &net->ipv6.mfc6_unres_queue;
7bc570c8
YH
566
567 while ((c = *cp) != NULL) {
568 if (time_after(c->mfc_un.unres.expires, now)) {
569 /* not yet... */
570 unsigned long interval = c->mfc_un.unres.expires - now;
571 if (interval < expires)
572 expires = interval;
573 cp = &c->next;
574 continue;
575 }
576
577 *cp = c->next;
578 ip6mr_destroy_unres(c);
579 }
580
c476efbc
PM
581 if (net->ipv6.mfc6_unres_queue != NULL)
582 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires);
7bc570c8
YH
583}
584
c476efbc 585static void ipmr_expire_process(unsigned long arg)
7bc570c8 586{
c476efbc
PM
587 struct net *net = (struct net *)arg;
588
7bc570c8 589 if (!spin_trylock(&mfc_unres_lock)) {
c476efbc 590 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + 1);
7bc570c8
YH
591 return;
592 }
593
c476efbc
PM
594 if (net->ipv6.mfc6_unres_queue != NULL)
595 ipmr_do_expire_process(net);
7bc570c8
YH
596
597 spin_unlock(&mfc_unres_lock);
598}
599
600/* Fill oifs list. It is called under write locked mrt_lock. */
601
602static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
603{
604 int vifi;
8229efda 605 struct net *net = mfc6_net(cache);
7bc570c8 606
6ac7eb08 607 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 608 cache->mfc_un.res.maxvif = 0;
6ac7eb08 609 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8 610
8229efda
BT
611 for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) {
612 if (MIF_EXISTS(net, vifi) &&
4e16880c 613 ttls[vifi] && ttls[vifi] < 255) {
7bc570c8
YH
614 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
615 if (cache->mfc_un.res.minvif > vifi)
616 cache->mfc_un.res.minvif = vifi;
617 if (cache->mfc_un.res.maxvif <= vifi)
618 cache->mfc_un.res.maxvif = vifi + 1;
619 }
620 }
621}
622
8229efda 623static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
7bc570c8
YH
624{
625 int vifi = vifc->mif6c_mifi;
8229efda 626 struct mif_device *v = &net->ipv6.vif6_table[vifi];
7bc570c8 627 struct net_device *dev;
1d6e55f1 628 struct inet6_dev *in6_dev;
5ae7b444 629 int err;
7bc570c8
YH
630
631 /* Is vif busy ? */
8229efda 632 if (MIF_EXISTS(net, vifi))
7bc570c8
YH
633 return -EADDRINUSE;
634
635 switch (vifc->mif6c_flags) {
14fb64e1
YH
636#ifdef CONFIG_IPV6_PIMSM_V2
637 case MIFF_REGISTER:
638 /*
639 * Special Purpose VIF in PIM
640 * All the packets will be sent to the daemon
641 */
8229efda 642 if (net->ipv6.mroute_reg_vif_num >= 0)
14fb64e1 643 return -EADDRINUSE;
8229efda 644 dev = ip6mr_reg_vif(net);
14fb64e1
YH
645 if (!dev)
646 return -ENOBUFS;
5ae7b444
WC
647 err = dev_set_allmulti(dev, 1);
648 if (err) {
649 unregister_netdevice(dev);
7af3db78 650 dev_put(dev);
5ae7b444
WC
651 return err;
652 }
14fb64e1
YH
653 break;
654#endif
7bc570c8 655 case 0:
8229efda 656 dev = dev_get_by_index(net, vifc->mif6c_pifi);
7bc570c8
YH
657 if (!dev)
658 return -EADDRNOTAVAIL;
5ae7b444 659 err = dev_set_allmulti(dev, 1);
7af3db78
WC
660 if (err) {
661 dev_put(dev);
5ae7b444 662 return err;
7af3db78 663 }
7bc570c8
YH
664 break;
665 default:
666 return -EINVAL;
667 }
668
1d6e55f1
TG
669 in6_dev = __in6_dev_get(dev);
670 if (in6_dev)
671 in6_dev->cnf.mc_forwarding++;
672
7bc570c8
YH
673 /*
674 * Fill in the VIF structures
675 */
676 v->rate_limit = vifc->vifc_rate_limit;
677 v->flags = vifc->mif6c_flags;
678 if (!mrtsock)
679 v->flags |= VIFF_STATIC;
680 v->threshold = vifc->vifc_threshold;
681 v->bytes_in = 0;
682 v->bytes_out = 0;
683 v->pkt_in = 0;
684 v->pkt_out = 0;
685 v->link = dev->ifindex;
686 if (v->flags & MIFF_REGISTER)
687 v->link = dev->iflink;
688
689 /* And finish update writing critical data */
690 write_lock_bh(&mrt_lock);
7bc570c8 691 v->dev = dev;
14fb64e1
YH
692#ifdef CONFIG_IPV6_PIMSM_V2
693 if (v->flags & MIFF_REGISTER)
8229efda 694 net->ipv6.mroute_reg_vif_num = vifi;
14fb64e1 695#endif
8229efda
BT
696 if (vifi + 1 > net->ipv6.maxvif)
697 net->ipv6.maxvif = vifi + 1;
7bc570c8
YH
698 write_unlock_bh(&mrt_lock);
699 return 0;
700}
701
8229efda
BT
702static struct mfc6_cache *ip6mr_cache_find(struct net *net,
703 struct in6_addr *origin,
704 struct in6_addr *mcastgrp)
7bc570c8
YH
705{
706 int line = MFC6_HASH(mcastgrp, origin);
707 struct mfc6_cache *c;
708
8229efda 709 for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) {
7bc570c8
YH
710 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
711 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
712 break;
713 }
714 return c;
715}
716
717/*
718 * Allocate a multicast cache entry
719 */
58701ad4 720static struct mfc6_cache *ip6mr_cache_alloc(struct net *net)
7bc570c8 721{
36cbac59 722 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
7bc570c8
YH
723 if (c == NULL)
724 return NULL;
6ac7eb08 725 c->mfc_un.res.minvif = MAXMIFS;
58701ad4 726 mfc6_net_set(c, net);
7bc570c8
YH
727 return c;
728}
729
58701ad4 730static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
7bc570c8 731{
36cbac59 732 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
7bc570c8
YH
733 if (c == NULL)
734 return NULL;
7bc570c8
YH
735 skb_queue_head_init(&c->mfc_un.unres.unresolved);
736 c->mfc_un.unres.expires = jiffies + 10 * HZ;
58701ad4 737 mfc6_net_set(c, net);
7bc570c8
YH
738 return c;
739}
740
741/*
742 * A cache entry has gone into a resolved state from queued
743 */
744
745static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
746{
747 struct sk_buff *skb;
748
749 /*
750 * Play the pending entries through our router
751 */
752
753 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
754 if (ipv6_hdr(skb)->version == 0) {
755 int err;
756 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
757
758 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
549e028d 759 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
760 } else {
761 nlh->nlmsg_type = NLMSG_ERROR;
762 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
763 skb_trim(skb, nlh->nlmsg_len);
764 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
765 }
8229efda 766 err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid);
7bc570c8
YH
767 } else
768 ip6_mr_forward(skb, c);
769 }
770}
771
772/*
773 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
774 * expects the following bizarre scheme.
775 *
776 * Called under mrt_lock.
777 */
778
8229efda
BT
779static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
780 int assert)
7bc570c8
YH
781{
782 struct sk_buff *skb;
783 struct mrt6msg *msg;
784 int ret;
785
14fb64e1
YH
786#ifdef CONFIG_IPV6_PIMSM_V2
787 if (assert == MRT6MSG_WHOLEPKT)
788 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
789 +sizeof(*msg));
790 else
791#endif
792 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
793
794 if (!skb)
795 return -ENOBUFS;
796
797 /* I suppose that internal messages
798 * do not require checksums */
799
800 skb->ip_summed = CHECKSUM_UNNECESSARY;
801
14fb64e1
YH
802#ifdef CONFIG_IPV6_PIMSM_V2
803 if (assert == MRT6MSG_WHOLEPKT) {
804 /* Ugly, but we have no choice with this interface.
805 Duplicate old header, fix length etc.
806 And all this only to mangle msg->im6_msgtype and
807 to set msg->im6_mbz to "mbz" :-)
808 */
809 skb_push(skb, -skb_network_offset(pkt));
810
811 skb_push(skb, sizeof(*msg));
812 skb_reset_transport_header(skb);
813 msg = (struct mrt6msg *)skb_transport_header(skb);
814 msg->im6_mbz = 0;
815 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
8229efda 816 msg->im6_mif = net->ipv6.mroute_reg_vif_num;
14fb64e1
YH
817 msg->im6_pad = 0;
818 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
819 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
820
821 skb->ip_summed = CHECKSUM_UNNECESSARY;
822 } else
823#endif
824 {
7bc570c8
YH
825 /*
826 * Copy the IP header
827 */
828
829 skb_put(skb, sizeof(struct ipv6hdr));
830 skb_reset_network_header(skb);
831 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
832
833 /*
834 * Add our header
835 */
836 skb_put(skb, sizeof(*msg));
837 skb_reset_transport_header(skb);
838 msg = (struct mrt6msg *)skb_transport_header(skb);
839
840 msg->im6_mbz = 0;
841 msg->im6_msgtype = assert;
6ac7eb08 842 msg->im6_mif = mifi;
7bc570c8
YH
843 msg->im6_pad = 0;
844 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
845 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
846
adf30907 847 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
7bc570c8 848 skb->ip_summed = CHECKSUM_UNNECESSARY;
14fb64e1 849 }
7bc570c8 850
8229efda 851 if (net->ipv6.mroute6_sk == NULL) {
7bc570c8
YH
852 kfree_skb(skb);
853 return -EINVAL;
854 }
855
856 /*
857 * Deliver to user space multicast routing algorithms
858 */
8229efda 859 ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb);
bd91b8bf 860 if (ret < 0) {
7bc570c8
YH
861 if (net_ratelimit())
862 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
863 kfree_skb(skb);
864 }
865
866 return ret;
867}
868
869/*
870 * Queue a packet for resolution. It gets locked cache entry!
871 */
872
873static int
8229efda 874ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
7bc570c8
YH
875{
876 int err;
877 struct mfc6_cache *c;
878
879 spin_lock_bh(&mfc_unres_lock);
c476efbc
PM
880 for (c = net->ipv6.mfc6_unres_queue; c; c = c->next) {
881 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
7bc570c8
YH
882 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
883 break;
884 }
885
886 if (c == NULL) {
887 /*
888 * Create a new entry if allowable
889 */
890
8229efda
BT
891 if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 ||
892 (c = ip6mr_cache_alloc_unres(net)) == NULL) {
7bc570c8
YH
893 spin_unlock_bh(&mfc_unres_lock);
894
895 kfree_skb(skb);
896 return -ENOBUFS;
897 }
898
899 /*
900 * Fill in the new cache entry
901 */
902 c->mf6c_parent = -1;
903 c->mf6c_origin = ipv6_hdr(skb)->saddr;
904 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
905
906 /*
907 * Reflect first query at pim6sd
908 */
8229efda
BT
909 err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE);
910 if (err < 0) {
7bc570c8
YH
911 /* If the report failed throw the cache entry
912 out - Brad Parker
913 */
914 spin_unlock_bh(&mfc_unres_lock);
915
58701ad4 916 ip6mr_cache_free(c);
7bc570c8
YH
917 kfree_skb(skb);
918 return err;
919 }
920
8229efda 921 atomic_inc(&net->ipv6.cache_resolve_queue_len);
c476efbc
PM
922 c->next = net->ipv6.mfc6_unres_queue;
923 net->ipv6.mfc6_unres_queue = c;
7bc570c8 924
c476efbc 925 ipmr_do_expire_process(net);
7bc570c8
YH
926 }
927
928 /*
929 * See if we can append the packet
930 */
931 if (c->mfc_un.unres.unresolved.qlen > 3) {
932 kfree_skb(skb);
933 err = -ENOBUFS;
934 } else {
935 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
936 err = 0;
937 }
938
939 spin_unlock_bh(&mfc_unres_lock);
940 return err;
941}
942
943/*
944 * MFC6 cache manipulation by user space
945 */
946
8229efda 947static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc)
7bc570c8
YH
948{
949 int line;
950 struct mfc6_cache *c, **cp;
951
952 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
953
8229efda 954 for (cp = &net->ipv6.mfc6_cache_array[line];
4a6258a0 955 (c = *cp) != NULL; cp = &c->next) {
7bc570c8
YH
956 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
957 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
958 write_lock_bh(&mrt_lock);
959 *cp = c->next;
960 write_unlock_bh(&mrt_lock);
961
58701ad4 962 ip6mr_cache_free(c);
7bc570c8
YH
963 return 0;
964 }
965 }
966 return -ENOENT;
967}
968
969static int ip6mr_device_event(struct notifier_block *this,
970 unsigned long event, void *ptr)
971{
972 struct net_device *dev = ptr;
8229efda 973 struct net *net = dev_net(dev);
7bc570c8
YH
974 struct mif_device *v;
975 int ct;
c871e664 976 LIST_HEAD(list);
7bc570c8 977
7bc570c8
YH
978 if (event != NETDEV_UNREGISTER)
979 return NOTIFY_DONE;
980
8229efda
BT
981 v = &net->ipv6.vif6_table[0];
982 for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) {
7bc570c8 983 if (v->dev == dev)
c871e664 984 mif6_delete(net, ct, &list);
7bc570c8 985 }
c871e664
ED
986 unregister_netdevice_many(&list);
987
7bc570c8
YH
988 return NOTIFY_DONE;
989}
990
991static struct notifier_block ip6_mr_notifier = {
992 .notifier_call = ip6mr_device_event
993};
994
995/*
996 * Setup for IP multicast routing
997 */
998
4e16880c
BT
999static int __net_init ip6mr_net_init(struct net *net)
1000{
1001 int err = 0;
4e16880c
BT
1002 net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device),
1003 GFP_KERNEL);
1004 if (!net->ipv6.vif6_table) {
1005 err = -ENOMEM;
1006 goto fail;
1007 }
4a6258a0
BT
1008
1009 /* Forwarding cache */
1010 net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES,
1011 sizeof(struct mfc6_cache *),
1012 GFP_KERNEL);
1013 if (!net->ipv6.mfc6_cache_array) {
1014 err = -ENOMEM;
1015 goto fail_mfc6_cache;
1016 }
950d5704 1017
c476efbc
PM
1018 setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process,
1019 (unsigned long)net);
1020
950d5704
BT
1021#ifdef CONFIG_IPV6_PIMSM_V2
1022 net->ipv6.mroute_reg_vif_num = -1;
1023#endif
8b90fc7e
BT
1024
1025#ifdef CONFIG_PROC_FS
1026 err = -ENOMEM;
1027 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1028 goto proc_vif_fail;
1029 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1030 goto proc_cache_fail;
1031#endif
4a6258a0
BT
1032 return 0;
1033
8b90fc7e
BT
1034#ifdef CONFIG_PROC_FS
1035proc_cache_fail:
1036 proc_net_remove(net, "ip6_mr_vif");
1037proc_vif_fail:
1038 kfree(net->ipv6.mfc6_cache_array);
1039#endif
4a6258a0
BT
1040fail_mfc6_cache:
1041 kfree(net->ipv6.vif6_table);
4e16880c
BT
1042fail:
1043 return err;
1044}
1045
1046static void __net_exit ip6mr_net_exit(struct net *net)
1047{
8b90fc7e
BT
1048#ifdef CONFIG_PROC_FS
1049 proc_net_remove(net, "ip6_mr_cache");
1050 proc_net_remove(net, "ip6_mr_vif");
1051#endif
c476efbc 1052 del_timer(&net->ipv6.ipmr_expire_timer);
8229efda 1053 mroute_clean_tables(net);
4a6258a0 1054 kfree(net->ipv6.mfc6_cache_array);
4e16880c
BT
1055 kfree(net->ipv6.vif6_table);
1056}
1057
1058static struct pernet_operations ip6mr_net_ops = {
1059 .init = ip6mr_net_init,
1060 .exit = ip6mr_net_exit,
1061};
1062
623d1a1a 1063int __init ip6_mr_init(void)
7bc570c8 1064{
623d1a1a
WC
1065 int err;
1066
7bc570c8
YH
1067 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1068 sizeof(struct mfc6_cache),
1069 0, SLAB_HWCACHE_ALIGN,
1070 NULL);
1071 if (!mrt_cachep)
623d1a1a 1072 return -ENOMEM;
7bc570c8 1073
4e16880c
BT
1074 err = register_pernet_subsys(&ip6mr_net_ops);
1075 if (err)
1076 goto reg_pernet_fail;
1077
623d1a1a
WC
1078 err = register_netdevice_notifier(&ip6_mr_notifier);
1079 if (err)
1080 goto reg_notif_fail;
403dbb97
TG
1081#ifdef CONFIG_IPV6_PIMSM_V2
1082 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1083 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1084 err = -EAGAIN;
1085 goto add_proto_fail;
1086 }
1087#endif
623d1a1a 1088 return 0;
403dbb97
TG
1089#ifdef CONFIG_IPV6_PIMSM_V2
1090add_proto_fail:
1091 unregister_netdevice_notifier(&ip6_mr_notifier);
1092#endif
87b30a65 1093reg_notif_fail:
4e16880c
BT
1094 unregister_pernet_subsys(&ip6mr_net_ops);
1095reg_pernet_fail:
87b30a65 1096 kmem_cache_destroy(mrt_cachep);
623d1a1a 1097 return err;
7bc570c8
YH
1098}
1099
623d1a1a
WC
1100void ip6_mr_cleanup(void)
1101{
623d1a1a 1102 unregister_netdevice_notifier(&ip6_mr_notifier);
4e16880c 1103 unregister_pernet_subsys(&ip6mr_net_ops);
623d1a1a
WC
1104 kmem_cache_destroy(mrt_cachep);
1105}
7bc570c8 1106
8229efda 1107static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
7bc570c8
YH
1108{
1109 int line;
1110 struct mfc6_cache *uc, *c, **cp;
6ac7eb08 1111 unsigned char ttls[MAXMIFS];
7bc570c8
YH
1112 int i;
1113
a50436f2
PM
1114 if (mfc->mf6cc_parent >= MAXMIFS)
1115 return -ENFILE;
1116
6ac7eb08
RR
1117 memset(ttls, 255, MAXMIFS);
1118 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1119 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1120 ttls[i] = 1;
1121
1122 }
1123
1124 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1125
8229efda 1126 for (cp = &net->ipv6.mfc6_cache_array[line];
4a6258a0 1127 (c = *cp) != NULL; cp = &c->next) {
7bc570c8
YH
1128 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1129 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1130 break;
1131 }
1132
1133 if (c != NULL) {
1134 write_lock_bh(&mrt_lock);
1135 c->mf6c_parent = mfc->mf6cc_parent;
1136 ip6mr_update_thresholds(c, ttls);
1137 if (!mrtsock)
1138 c->mfc_flags |= MFC_STATIC;
1139 write_unlock_bh(&mrt_lock);
1140 return 0;
1141 }
1142
1143 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1144 return -EINVAL;
1145
8229efda 1146 c = ip6mr_cache_alloc(net);
7bc570c8
YH
1147 if (c == NULL)
1148 return -ENOMEM;
1149
1150 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1151 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1152 c->mf6c_parent = mfc->mf6cc_parent;
1153 ip6mr_update_thresholds(c, ttls);
1154 if (!mrtsock)
1155 c->mfc_flags |= MFC_STATIC;
1156
1157 write_lock_bh(&mrt_lock);
8229efda
BT
1158 c->next = net->ipv6.mfc6_cache_array[line];
1159 net->ipv6.mfc6_cache_array[line] = c;
7bc570c8
YH
1160 write_unlock_bh(&mrt_lock);
1161
1162 /*
1163 * Check to see if we resolved a queued list. If so we
1164 * need to send on the frames and tidy up.
1165 */
1166 spin_lock_bh(&mfc_unres_lock);
c476efbc 1167 for (cp = &net->ipv6.mfc6_unres_queue; (uc = *cp) != NULL;
7bc570c8 1168 cp = &uc->next) {
c476efbc 1169 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
7bc570c8
YH
1170 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1171 *cp = uc->next;
8229efda 1172 atomic_dec(&net->ipv6.cache_resolve_queue_len);
7bc570c8
YH
1173 break;
1174 }
1175 }
c476efbc
PM
1176 if (net->ipv6.mfc6_unres_queue == NULL)
1177 del_timer(&net->ipv6.ipmr_expire_timer);
7bc570c8
YH
1178 spin_unlock_bh(&mfc_unres_lock);
1179
1180 if (uc) {
1181 ip6mr_cache_resolve(uc, c);
58701ad4 1182 ip6mr_cache_free(uc);
7bc570c8
YH
1183 }
1184 return 0;
1185}
1186
1187/*
1188 * Close the multicast socket, and clear the vif tables etc
1189 */
1190
8229efda 1191static void mroute_clean_tables(struct net *net)
7bc570c8
YH
1192{
1193 int i;
c871e664 1194 LIST_HEAD(list);
7bc570c8
YH
1195
1196 /*
1197 * Shut down all active vif entries
1198 */
8229efda
BT
1199 for (i = 0; i < net->ipv6.maxvif; i++) {
1200 if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC))
c871e664 1201 mif6_delete(net, i, &list);
7bc570c8 1202 }
c871e664 1203 unregister_netdevice_many(&list);
7bc570c8
YH
1204
1205 /*
1206 * Wipe the cache
1207 */
4a6258a0 1208 for (i = 0; i < MFC6_LINES; i++) {
7bc570c8
YH
1209 struct mfc6_cache *c, **cp;
1210
8229efda 1211 cp = &net->ipv6.mfc6_cache_array[i];
7bc570c8
YH
1212 while ((c = *cp) != NULL) {
1213 if (c->mfc_flags & MFC_STATIC) {
1214 cp = &c->next;
1215 continue;
1216 }
1217 write_lock_bh(&mrt_lock);
1218 *cp = c->next;
1219 write_unlock_bh(&mrt_lock);
1220
58701ad4 1221 ip6mr_cache_free(c);
7bc570c8
YH
1222 }
1223 }
1224
8229efda 1225 if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) {
4045e57c 1226 struct mfc6_cache *c, **cp;
7bc570c8
YH
1227
1228 spin_lock_bh(&mfc_unres_lock);
c476efbc 1229 cp = &net->ipv6.mfc6_unres_queue;
4045e57c 1230 while ((c = *cp) != NULL) {
4045e57c 1231 *cp = c->next;
7bc570c8 1232 ip6mr_destroy_unres(c);
7bc570c8
YH
1233 }
1234 spin_unlock_bh(&mfc_unres_lock);
1235 }
1236}
1237
1238static int ip6mr_sk_init(struct sock *sk)
1239{
1240 int err = 0;
8229efda 1241 struct net *net = sock_net(sk);
7bc570c8
YH
1242
1243 rtnl_lock();
1244 write_lock_bh(&mrt_lock);
1d6e55f1 1245 if (likely(net->ipv6.mroute6_sk == NULL)) {
8229efda 1246 net->ipv6.mroute6_sk = sk;
1d6e55f1
TG
1247 net->ipv6.devconf_all->mc_forwarding++;
1248 }
7bc570c8
YH
1249 else
1250 err = -EADDRINUSE;
1251 write_unlock_bh(&mrt_lock);
1252
1253 rtnl_unlock();
1254
1255 return err;
1256}
1257
1258int ip6mr_sk_done(struct sock *sk)
1259{
1260 int err = 0;
8229efda 1261 struct net *net = sock_net(sk);
7bc570c8
YH
1262
1263 rtnl_lock();
8229efda 1264 if (sk == net->ipv6.mroute6_sk) {
7bc570c8 1265 write_lock_bh(&mrt_lock);
8229efda 1266 net->ipv6.mroute6_sk = NULL;
1d6e55f1 1267 net->ipv6.devconf_all->mc_forwarding--;
7bc570c8
YH
1268 write_unlock_bh(&mrt_lock);
1269
8229efda 1270 mroute_clean_tables(net);
7bc570c8
YH
1271 } else
1272 err = -EACCES;
1273 rtnl_unlock();
1274
1275 return err;
1276}
1277
1278/*
1279 * Socket options and virtual interface manipulation. The whole
1280 * virtual interface system is a complete heap, but unfortunately
1281 * that's how BSD mrouted happens to think. Maybe one day with a proper
1282 * MOSPF/PIM router set up we can clean this up.
1283 */
1284
b7058842 1285int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
7bc570c8
YH
1286{
1287 int ret;
1288 struct mif6ctl vif;
1289 struct mf6cctl mfc;
1290 mifi_t mifi;
8229efda 1291 struct net *net = sock_net(sk);
7bc570c8
YH
1292
1293 if (optname != MRT6_INIT) {
8229efda 1294 if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN))
7bc570c8
YH
1295 return -EACCES;
1296 }
1297
1298 switch (optname) {
1299 case MRT6_INIT:
1300 if (sk->sk_type != SOCK_RAW ||
c720c7e8 1301 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
7bc570c8
YH
1302 return -EOPNOTSUPP;
1303 if (optlen < sizeof(int))
1304 return -EINVAL;
1305
1306 return ip6mr_sk_init(sk);
1307
1308 case MRT6_DONE:
1309 return ip6mr_sk_done(sk);
1310
1311 case MRT6_ADD_MIF:
1312 if (optlen < sizeof(vif))
1313 return -EINVAL;
1314 if (copy_from_user(&vif, optval, sizeof(vif)))
1315 return -EFAULT;
6ac7eb08 1316 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1317 return -ENFILE;
1318 rtnl_lock();
8229efda 1319 ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk);
7bc570c8
YH
1320 rtnl_unlock();
1321 return ret;
1322
1323 case MRT6_DEL_MIF:
1324 if (optlen < sizeof(mifi_t))
1325 return -EINVAL;
1326 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1327 return -EFAULT;
1328 rtnl_lock();
c871e664 1329 ret = mif6_delete(net, mifi, NULL);
7bc570c8
YH
1330 rtnl_unlock();
1331 return ret;
1332
1333 /*
1334 * Manipulate the forwarding caches. These live
1335 * in a sort of kernel/user symbiosis.
1336 */
1337 case MRT6_ADD_MFC:
1338 case MRT6_DEL_MFC:
1339 if (optlen < sizeof(mfc))
1340 return -EINVAL;
1341 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1342 return -EFAULT;
1343 rtnl_lock();
1344 if (optname == MRT6_DEL_MFC)
8229efda 1345 ret = ip6mr_mfc_delete(net, &mfc);
7bc570c8 1346 else
8229efda
BT
1347 ret = ip6mr_mfc_add(net, &mfc,
1348 sk == net->ipv6.mroute6_sk);
7bc570c8
YH
1349 rtnl_unlock();
1350 return ret;
1351
14fb64e1
YH
1352 /*
1353 * Control PIM assert (to activate pim will activate assert)
1354 */
1355 case MRT6_ASSERT:
1356 {
1357 int v;
1358 if (get_user(v, (int __user *)optval))
1359 return -EFAULT;
8229efda 1360 net->ipv6.mroute_do_assert = !!v;
14fb64e1
YH
1361 return 0;
1362 }
1363
1364#ifdef CONFIG_IPV6_PIMSM_V2
1365 case MRT6_PIM:
1366 {
a9f83bf3 1367 int v;
14fb64e1
YH
1368 if (get_user(v, (int __user *)optval))
1369 return -EFAULT;
1370 v = !!v;
1371 rtnl_lock();
1372 ret = 0;
8229efda
BT
1373 if (v != net->ipv6.mroute_do_pim) {
1374 net->ipv6.mroute_do_pim = v;
1375 net->ipv6.mroute_do_assert = v;
14fb64e1
YH
1376 }
1377 rtnl_unlock();
1378 return ret;
1379 }
1380
1381#endif
7bc570c8 1382 /*
7d120c55 1383 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1384 * set.
1385 */
1386 default:
1387 return -ENOPROTOOPT;
1388 }
1389}
1390
1391/*
1392 * Getsock opt support for the multicast routing system.
1393 */
1394
1395int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1396 int __user *optlen)
1397{
1398 int olr;
1399 int val;
8229efda 1400 struct net *net = sock_net(sk);
7bc570c8
YH
1401
1402 switch (optname) {
1403 case MRT6_VERSION:
1404 val = 0x0305;
1405 break;
14fb64e1
YH
1406#ifdef CONFIG_IPV6_PIMSM_V2
1407 case MRT6_PIM:
8229efda 1408 val = net->ipv6.mroute_do_pim;
14fb64e1
YH
1409 break;
1410#endif
1411 case MRT6_ASSERT:
8229efda 1412 val = net->ipv6.mroute_do_assert;
14fb64e1 1413 break;
7bc570c8
YH
1414 default:
1415 return -ENOPROTOOPT;
1416 }
1417
1418 if (get_user(olr, optlen))
1419 return -EFAULT;
1420
1421 olr = min_t(int, olr, sizeof(int));
1422 if (olr < 0)
1423 return -EINVAL;
1424
1425 if (put_user(olr, optlen))
1426 return -EFAULT;
1427 if (copy_to_user(optval, &val, olr))
1428 return -EFAULT;
1429 return 0;
1430}
1431
1432/*
1433 * The IP multicast ioctl support routines.
1434 */
1435
1436int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1437{
1438 struct sioc_sg_req6 sr;
1439 struct sioc_mif_req6 vr;
1440 struct mif_device *vif;
1441 struct mfc6_cache *c;
8229efda 1442 struct net *net = sock_net(sk);
7bc570c8
YH
1443
1444 switch (cmd) {
1445 case SIOCGETMIFCNT_IN6:
1446 if (copy_from_user(&vr, arg, sizeof(vr)))
1447 return -EFAULT;
8229efda 1448 if (vr.mifi >= net->ipv6.maxvif)
7bc570c8
YH
1449 return -EINVAL;
1450 read_lock(&mrt_lock);
8229efda
BT
1451 vif = &net->ipv6.vif6_table[vr.mifi];
1452 if (MIF_EXISTS(net, vr.mifi)) {
7bc570c8
YH
1453 vr.icount = vif->pkt_in;
1454 vr.ocount = vif->pkt_out;
1455 vr.ibytes = vif->bytes_in;
1456 vr.obytes = vif->bytes_out;
1457 read_unlock(&mrt_lock);
1458
1459 if (copy_to_user(arg, &vr, sizeof(vr)))
1460 return -EFAULT;
1461 return 0;
1462 }
1463 read_unlock(&mrt_lock);
1464 return -EADDRNOTAVAIL;
1465 case SIOCGETSGCNT_IN6:
1466 if (copy_from_user(&sr, arg, sizeof(sr)))
1467 return -EFAULT;
1468
1469 read_lock(&mrt_lock);
8229efda 1470 c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr);
7bc570c8
YH
1471 if (c) {
1472 sr.pktcnt = c->mfc_un.res.pkt;
1473 sr.bytecnt = c->mfc_un.res.bytes;
1474 sr.wrong_if = c->mfc_un.res.wrong_if;
1475 read_unlock(&mrt_lock);
1476
1477 if (copy_to_user(arg, &sr, sizeof(sr)))
1478 return -EFAULT;
1479 return 0;
1480 }
1481 read_unlock(&mrt_lock);
1482 return -EADDRNOTAVAIL;
1483 default:
1484 return -ENOIOCTLCMD;
1485 }
1486}
1487
1488
1489static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1490{
adf30907 1491 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
483a47d2 1492 IPSTATS_MIB_OUTFORWDATAGRAMS);
7bc570c8
YH
1493 return dst_output(skb);
1494}
1495
1496/*
1497 * Processing handlers for ip6mr_forward
1498 */
1499
1500static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1501{
1502 struct ipv6hdr *ipv6h;
8229efda
BT
1503 struct net *net = mfc6_net(c);
1504 struct mif_device *vif = &net->ipv6.vif6_table[vifi];
7bc570c8
YH
1505 struct net_device *dev;
1506 struct dst_entry *dst;
1507 struct flowi fl;
1508
1509 if (vif->dev == NULL)
1510 goto out_free;
1511
14fb64e1
YH
1512#ifdef CONFIG_IPV6_PIMSM_V2
1513 if (vif->flags & MIFF_REGISTER) {
1514 vif->pkt_out++;
1515 vif->bytes_out += skb->len;
dc58c78c
PE
1516 vif->dev->stats.tx_bytes += skb->len;
1517 vif->dev->stats.tx_packets++;
8229efda 1518 ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT);
8da73b73 1519 goto out_free;
14fb64e1
YH
1520 }
1521#endif
1522
7bc570c8
YH
1523 ipv6h = ipv6_hdr(skb);
1524
1525 fl = (struct flowi) {
1526 .oif = vif->link,
1527 .nl_u = { .ip6_u =
1528 { .daddr = ipv6h->daddr, }
1529 }
1530 };
1531
8229efda 1532 dst = ip6_route_output(net, NULL, &fl);
7bc570c8
YH
1533 if (!dst)
1534 goto out_free;
1535
adf30907
ED
1536 skb_dst_drop(skb);
1537 skb_dst_set(skb, dst);
7bc570c8
YH
1538
1539 /*
1540 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1541 * not only before forwarding, but after forwarding on all output
1542 * interfaces. It is clear, if mrouter runs a multicasting
1543 * program, it should receive packets not depending to what interface
1544 * program is joined.
1545 * If we will not make it, the program will have to join on all
1546 * interfaces. On the other hand, multihoming host (or router, but
1547 * not mrouter) cannot join to more than one interface - it will
1548 * result in receiving multiple packets.
1549 */
1550 dev = vif->dev;
1551 skb->dev = dev;
1552 vif->pkt_out++;
1553 vif->bytes_out += skb->len;
1554
1555 /* We are about to write */
1556 /* XXX: extension headers? */
1557 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1558 goto out_free;
1559
1560 ipv6h = ipv6_hdr(skb);
1561 ipv6h->hop_limit--;
1562
1563 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1564
b2e0b385 1565 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
7bc570c8
YH
1566 ip6mr_forward2_finish);
1567
1568out_free:
1569 kfree_skb(skb);
1570 return 0;
1571}
1572
1573static int ip6mr_find_vif(struct net_device *dev)
1574{
8229efda 1575 struct net *net = dev_net(dev);
7bc570c8 1576 int ct;
8229efda
BT
1577 for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) {
1578 if (net->ipv6.vif6_table[ct].dev == dev)
7bc570c8
YH
1579 break;
1580 }
1581 return ct;
1582}
1583
1584static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1585{
1586 int psend = -1;
1587 int vif, ct;
8229efda 1588 struct net *net = mfc6_net(cache);
7bc570c8
YH
1589
1590 vif = cache->mf6c_parent;
1591 cache->mfc_un.res.pkt++;
1592 cache->mfc_un.res.bytes += skb->len;
1593
14fb64e1
YH
1594 /*
1595 * Wrong interface: drop packet and (maybe) send PIM assert.
1596 */
8229efda 1597 if (net->ipv6.vif6_table[vif].dev != skb->dev) {
14fb64e1
YH
1598 int true_vifi;
1599
1600 cache->mfc_un.res.wrong_if++;
1601 true_vifi = ip6mr_find_vif(skb->dev);
1602
8229efda 1603 if (true_vifi >= 0 && net->ipv6.mroute_do_assert &&
14fb64e1
YH
1604 /* pimsm uses asserts, when switching from RPT to SPT,
1605 so that we cannot check that packet arrived on an oif.
1606 It is bad, but otherwise we would need to move pretty
1607 large chunk of pimd to kernel. Ough... --ANK
1608 */
8229efda 1609 (net->ipv6.mroute_do_pim ||
a21f3f99 1610 cache->mfc_un.res.ttls[true_vifi] < 255) &&
14fb64e1
YH
1611 time_after(jiffies,
1612 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1613 cache->mfc_un.res.last_assert = jiffies;
8229efda 1614 ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF);
14fb64e1
YH
1615 }
1616 goto dont_forward;
1617 }
1618
8229efda
BT
1619 net->ipv6.vif6_table[vif].pkt_in++;
1620 net->ipv6.vif6_table[vif].bytes_in += skb->len;
7bc570c8
YH
1621
1622 /*
1623 * Forward the frame
1624 */
1625 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1626 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1627 if (psend != -1) {
1628 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1629 if (skb2)
1630 ip6mr_forward2(skb2, cache, psend);
1631 }
1632 psend = ct;
1633 }
1634 }
1635 if (psend != -1) {
1636 ip6mr_forward2(skb, cache, psend);
1637 return 0;
1638 }
1639
14fb64e1 1640dont_forward:
7bc570c8
YH
1641 kfree_skb(skb);
1642 return 0;
1643}
1644
1645
1646/*
1647 * Multicast packets for forwarding arrive here
1648 */
1649
1650int ip6_mr_input(struct sk_buff *skb)
1651{
1652 struct mfc6_cache *cache;
8229efda 1653 struct net *net = dev_net(skb->dev);
7bc570c8
YH
1654
1655 read_lock(&mrt_lock);
8229efda
BT
1656 cache = ip6mr_cache_find(net,
1657 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
7bc570c8
YH
1658
1659 /*
1660 * No usable cache entry
1661 */
1662 if (cache == NULL) {
1663 int vif;
1664
1665 vif = ip6mr_find_vif(skb->dev);
1666 if (vif >= 0) {
8229efda 1667 int err = ip6mr_cache_unresolved(net, vif, skb);
7bc570c8
YH
1668 read_unlock(&mrt_lock);
1669
1670 return err;
1671 }
1672 read_unlock(&mrt_lock);
1673 kfree_skb(skb);
1674 return -ENODEV;
1675 }
1676
1677 ip6_mr_forward(skb, cache);
1678
1679 read_unlock(&mrt_lock);
1680
1681 return 0;
1682}
1683
1684
1685static int
1686ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1687{
1688 int ct;
1689 struct rtnexthop *nhp;
8229efda 1690 struct net *net = mfc6_net(c);
549e028d 1691 u8 *b = skb_tail_pointer(skb);
7bc570c8
YH
1692 struct rtattr *mp_head;
1693
7438189b
ND
1694 /* If cache is unresolved, don't try to parse IIF and OIF */
1695 if (c->mf6c_parent > MAXMIFS)
1696 return -ENOENT;
1697
1698 if (MIF_EXISTS(net, c->mf6c_parent))
1699 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
7bc570c8
YH
1700
1701 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1702
1703 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
7438189b 1704 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
7bc570c8
YH
1705 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1706 goto rtattr_failure;
1707 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1708 nhp->rtnh_flags = 0;
1709 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
8229efda 1710 nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex;
7bc570c8
YH
1711 nhp->rtnh_len = sizeof(*nhp);
1712 }
1713 }
1714 mp_head->rta_type = RTA_MULTIPATH;
549e028d 1715 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
7bc570c8
YH
1716 rtm->rtm_type = RTN_MULTICAST;
1717 return 1;
1718
1719rtattr_failure:
1720 nlmsg_trim(skb, b);
1721 return -EMSGSIZE;
1722}
1723
8229efda
BT
1724int ip6mr_get_route(struct net *net,
1725 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
7bc570c8
YH
1726{
1727 int err;
1728 struct mfc6_cache *cache;
adf30907 1729 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
7bc570c8
YH
1730
1731 read_lock(&mrt_lock);
8229efda 1732 cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
7bc570c8
YH
1733
1734 if (!cache) {
1735 struct sk_buff *skb2;
1736 struct ipv6hdr *iph;
1737 struct net_device *dev;
1738 int vif;
1739
1740 if (nowait) {
1741 read_unlock(&mrt_lock);
1742 return -EAGAIN;
1743 }
1744
1745 dev = skb->dev;
1746 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1747 read_unlock(&mrt_lock);
1748 return -ENODEV;
1749 }
1750
1751 /* really correct? */
1752 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1753 if (!skb2) {
1754 read_unlock(&mrt_lock);
1755 return -ENOMEM;
1756 }
1757
1758 skb_reset_transport_header(skb2);
1759
1760 skb_put(skb2, sizeof(struct ipv6hdr));
1761 skb_reset_network_header(skb2);
1762
1763 iph = ipv6_hdr(skb2);
1764 iph->version = 0;
1765 iph->priority = 0;
1766 iph->flow_lbl[0] = 0;
1767 iph->flow_lbl[1] = 0;
1768 iph->flow_lbl[2] = 0;
1769 iph->payload_len = 0;
1770 iph->nexthdr = IPPROTO_NONE;
1771 iph->hop_limit = 0;
1772 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1773 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1774
8229efda 1775 err = ip6mr_cache_unresolved(net, vif, skb2);
7bc570c8
YH
1776 read_unlock(&mrt_lock);
1777
1778 return err;
1779 }
1780
1781 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1782 cache->mfc_flags |= MFC_NOTIFY;
1783
1784 err = ip6mr_fill_mroute(skb, cache, rtm);
1785 read_unlock(&mrt_lock);
1786 return err;
1787}
1788