1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/interrupt.h>
4 #include <linux/nsproxy.h>
5 #include <linux/compat.h>
6 #include <linux/if_tun.h>
7 #include <linux/module.h>
8 #include <linux/skbuff.h>
9 #include <linux/cache.h>
10 #include <linux/sched.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/wait.h>
14 #include <linux/cdev.h>
17 #include <net/net_namespace.h>
18 #include <net/rtnetlink.h>
22 * A macvtap queue is the central object of this driver, it connects
23 * an open character device to a macvlan interface. There can be
24 * multiple queues on one interface, which map back to queues
25 * implemented in hardware on the underlying device.
27 * macvtap_proto is used to allocate queues through the sock allocation
30 * TODO: multiqueue support is currently not implemented, even though
31 * macvtap is basically prepared for that. We will need to add this
32 * here as well as in virtio-net and qemu to get line rate on 10gbit
33 * adapters from a guest.
35 struct macvtap_queue {
38 struct macvlan_dev *vlan;
42 static struct proto macvtap_proto = {
45 .obj_size = sizeof (struct macvtap_queue),
49 * Minor number matches netdev->ifindex, so need a potentially
50 * large value. This also makes it possible to split the
51 * tap functionality out again in the future by offering it
52 * from other drivers besides macvtap. As long as every device
53 * only has one tap, the interface numbers assure that the
54 * device nodes are unique.
56 static unsigned int macvtap_major;
57 #define MACVTAP_NUM_DEVS 65536
58 static struct class *macvtap_class;
59 static struct cdev macvtap_cdev;
63 * The macvtap_queue and the macvlan_dev are loosely coupled, the
64 * pointers from one to the other can only be read while rcu_read_lock
65 * or macvtap_lock is held.
67 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
68 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
69 * q->vlan becomes inaccessible. When the files gets closed,
70 * macvtap_get_queue() fails.
72 * There may still be references to the struct sock inside of the
73 * queue from outbound SKBs, but these never reference back to the
74 * file or the dev. The data structure is freed through __sk_free
75 * when both our references and any pending SKBs are gone.
77 static DEFINE_SPINLOCK(macvtap_lock);
80 * Choose the next free queue, for now there is only one
82 static int macvtap_set_queue(struct net_device *dev, struct file *file,
83 struct macvtap_queue *q)
85 struct macvlan_dev *vlan = netdev_priv(dev);
88 spin_lock(&macvtap_lock);
89 if (rcu_dereference(vlan->tap))
93 rcu_assign_pointer(q->vlan, vlan);
94 rcu_assign_pointer(vlan->tap, q);
98 file->private_data = q;
101 spin_unlock(&macvtap_lock);
106 * The file owning the queue got closed, give up both
107 * the reference that the files holds as well as the
108 * one from the macvlan_dev if that still exists.
110 * Using the spinlock makes sure that we don't get
111 * to the queue again after destroying it.
113 static void macvtap_put_queue(struct macvtap_queue *q)
115 struct macvlan_dev *vlan;
117 spin_lock(&macvtap_lock);
118 vlan = rcu_dereference(q->vlan);
120 rcu_assign_pointer(vlan->tap, NULL);
121 rcu_assign_pointer(q->vlan, NULL);
125 spin_unlock(&macvtap_lock);
132 * Since we only support one queue, just dereference the pointer.
134 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
137 struct macvlan_dev *vlan = netdev_priv(dev);
139 return rcu_dereference(vlan->tap);
143 * The net_device is going away, give up the reference
144 * that it holds on the queue (all the queues one day)
145 * and safely set the pointer from the queues to NULL.
147 static void macvtap_del_queues(struct net_device *dev)
149 struct macvlan_dev *vlan = netdev_priv(dev);
150 struct macvtap_queue *q;
152 spin_lock(&macvtap_lock);
153 q = rcu_dereference(vlan->tap);
155 spin_unlock(&macvtap_lock);
159 rcu_assign_pointer(vlan->tap, NULL);
160 rcu_assign_pointer(q->vlan, NULL);
161 spin_unlock(&macvtap_lock);
168 * Forward happens for data that gets sent from one macvlan
169 * endpoint to another one in bridge mode. We just take
170 * the skb and put it into the receive queue.
172 static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
174 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
178 skb_queue_tail(&q->sk.sk_receive_queue, skb);
179 wake_up(q->sk.sk_sleep);
184 * Receive is for data from the external interface (lowerdev),
185 * in case of macvtap, we can treat that the same way as
186 * forward, which macvlan cannot.
188 static int macvtap_receive(struct sk_buff *skb)
190 skb_push(skb, ETH_HLEN);
191 return macvtap_forward(skb->dev, skb);
194 static int macvtap_newlink(struct net *src_net,
195 struct net_device *dev,
197 struct nlattr *data[])
199 struct device *classdev;
203 err = macvlan_common_newlink(src_net, dev, tb, data,
204 macvtap_receive, macvtap_forward);
208 devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
210 classdev = device_create(macvtap_class, &dev->dev, devt,
211 dev, "tap%d", dev->ifindex);
212 if (IS_ERR(classdev)) {
213 err = PTR_ERR(classdev);
214 macvtap_del_queues(dev);
221 static void macvtap_dellink(struct net_device *dev,
222 struct list_head *head)
224 device_destroy(macvtap_class,
225 MKDEV(MAJOR(macvtap_major), dev->ifindex));
227 macvtap_del_queues(dev);
228 macvlan_dellink(dev, head);
231 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
233 .newlink = macvtap_newlink,
234 .dellink = macvtap_dellink,
238 static void macvtap_sock_write_space(struct sock *sk)
240 if (!sock_writeable(sk) ||
241 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
244 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
245 wake_up_interruptible_sync(sk->sk_sleep);
248 static int macvtap_open(struct inode *inode, struct file *file)
250 struct net *net = current->nsproxy->net_ns;
251 struct net_device *dev = dev_get_by_index(net, iminor(inode));
252 struct macvtap_queue *q;
259 /* check if this is a macvtap device */
261 if (dev->rtnl_link_ops != &macvtap_link_ops)
265 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
270 init_waitqueue_head(&q->sock.wait);
271 q->sock.type = SOCK_RAW;
272 q->sock.state = SS_CONNECTED;
273 sock_init_data(&q->sock, &q->sk);
274 q->sk.sk_write_space = macvtap_sock_write_space;
276 err = macvtap_set_queue(dev, file, q);
287 static int macvtap_release(struct inode *inode, struct file *file)
289 struct macvtap_queue *q = file->private_data;
290 macvtap_put_queue(q);
294 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
296 struct macvtap_queue *q = file->private_data;
297 unsigned int mask = POLLERR;
303 poll_wait(file, &q->sock.wait, wait);
305 if (!skb_queue_empty(&q->sk.sk_receive_queue))
306 mask |= POLLIN | POLLRDNORM;
308 if (sock_writeable(&q->sk) ||
309 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
310 sock_writeable(&q->sk)))
311 mask |= POLLOUT | POLLWRNORM;
317 /* Get packet from user space buffer */
318 static ssize_t macvtap_get_user(struct macvtap_queue *q,
319 const struct iovec *iv, size_t count,
323 struct macvlan_dev *vlan;
327 if (unlikely(len < ETH_HLEN))
330 skb = sock_alloc_send_skb(&q->sk, NET_IP_ALIGN + len, noblock, &err);
334 skb_reserve(skb, NET_IP_ALIGN);
337 err = skb_copy_datagram_from_iovec(skb, 0, iv, 0, len);
341 skb_set_network_header(skb, ETH_HLEN);
343 vlan = rcu_dereference(q->vlan);
345 macvlan_start_xmit(skb, vlan->dev);
348 rcu_read_unlock_bh();
354 vlan = rcu_dereference(q->vlan);
356 macvlan_count_rx(q->vlan, 0, false, false);
357 rcu_read_unlock_bh();
364 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
365 unsigned long count, loff_t pos)
367 struct file *file = iocb->ki_filp;
368 ssize_t result = -ENOLINK;
369 struct macvtap_queue *q = file->private_data;
371 result = macvtap_get_user(q, iv, iov_length(iv, count),
372 file->f_flags & O_NONBLOCK);
376 /* Put packet to the user space buffer */
377 static ssize_t macvtap_put_user(struct macvtap_queue *q,
378 const struct sk_buff *skb,
379 const struct iovec *iv, int len)
381 struct macvlan_dev *vlan;
384 len = min_t(int, skb->len, len);
386 ret = skb_copy_datagram_const_iovec(skb, 0, iv, 0, len);
389 vlan = rcu_dereference(q->vlan);
390 macvlan_count_rx(vlan, len, ret == 0, 0);
391 rcu_read_unlock_bh();
393 return ret ? ret : len;
396 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
397 unsigned long count, loff_t pos)
399 struct file *file = iocb->ki_filp;
400 struct macvtap_queue *q = file->private_data;
402 DECLARE_WAITQUEUE(wait, current);
404 ssize_t len, ret = 0;
411 len = iov_length(iv, count);
417 add_wait_queue(q->sk.sk_sleep, &wait);
419 current->state = TASK_INTERRUPTIBLE;
421 /* Read frames from the queue */
422 skb = skb_dequeue(&q->sk.sk_receive_queue);
424 if (file->f_flags & O_NONBLOCK) {
428 if (signal_pending(current)) {
432 /* Nothing to read, let's sleep */
436 ret = macvtap_put_user(q, skb, iv, len);
441 current->state = TASK_RUNNING;
442 remove_wait_queue(q->sk.sk_sleep, &wait);
449 * provide compatibility with generic tun/tap interface
451 static long macvtap_ioctl(struct file *file, unsigned int cmd,
454 struct macvtap_queue *q = file->private_data;
455 struct macvlan_dev *vlan;
456 void __user *argp = (void __user *)arg;
457 struct ifreq __user *ifr = argp;
458 unsigned int __user *up = argp;
464 /* ignore the name, just look at flags */
465 if (get_user(u, &ifr->ifr_flags))
467 if (u != (IFF_TAP | IFF_NO_PI))
473 vlan = rcu_dereference(q->vlan);
476 rcu_read_unlock_bh();
482 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
483 put_user((TUN_TAP_DEV | TUN_NO_PI), &ifr->ifr_flags))
489 if (put_user((IFF_TAP | IFF_NO_PI), up))
501 /* let the user check for future flags */
502 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
503 TUN_F_TSO_ECN | TUN_F_UFO))
506 /* TODO: add support for these, so far we don't
507 support any offload */
508 if (arg & (TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
509 TUN_F_TSO_ECN | TUN_F_UFO))
520 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
523 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
527 static const struct file_operations macvtap_fops = {
528 .owner = THIS_MODULE,
529 .open = macvtap_open,
530 .release = macvtap_release,
531 .aio_read = macvtap_aio_read,
532 .aio_write = macvtap_aio_write,
533 .poll = macvtap_poll,
535 .unlocked_ioctl = macvtap_ioctl,
537 .compat_ioctl = macvtap_compat_ioctl,
541 static int macvtap_init(void)
545 err = alloc_chrdev_region(&macvtap_major, 0,
546 MACVTAP_NUM_DEVS, "macvtap");
550 cdev_init(&macvtap_cdev, &macvtap_fops);
551 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
555 macvtap_class = class_create(THIS_MODULE, "macvtap");
556 if (IS_ERR(macvtap_class)) {
557 err = PTR_ERR(macvtap_class);
561 err = macvlan_link_register(&macvtap_link_ops);
568 class_unregister(macvtap_class);
570 cdev_del(&macvtap_cdev);
572 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
576 module_init(macvtap_init);
578 static void macvtap_exit(void)
580 rtnl_link_unregister(&macvtap_link_ops);
581 class_unregister(macvtap_class);
582 cdev_del(&macvtap_cdev);
583 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
585 module_exit(macvtap_exit);
587 MODULE_ALIAS_RTNL_LINK("macvtap");
588 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
589 MODULE_LICENSE("GPL");