]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'vhost-net' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorDavid S. Miller <davem@davemloft.net>
Fri, 10 Sep 2010 04:59:51 +0000 (21:59 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 10 Sep 2010 04:59:51 +0000 (21:59 -0700)
drivers/vhost/vhost.c
include/linux/cgroup.h
kernel/cgroup.c

index 4b99117f3ecd209c63571c610f813bd41d50abba..c579dcc9200ccabe0f1bcf79afed59577e4a6f38 100644 (file)
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
        return 0;
 }
 
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+       INIT_LIST_HEAD(&work->node);
+       work->fn = fn;
+       init_waitqueue_head(&work->done);
+       work->flushing = 0;
+       work->queue_seq = work->done_seq = 0;
+}
+
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev)
 {
-       struct vhost_work *work = &poll->work;
-
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
 
-       INIT_LIST_HEAD(&work->node);
-       work->fn = fn;
-       init_waitqueue_head(&work->done);
-       work->flushing = 0;
-       work->queue_seq = work->done_seq = 0;
+       vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
        remove_wait_queue(poll->wqh, &poll->wait);
 }
 
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
-       struct vhost_work *work = &poll->work;
        unsigned seq;
        int left;
        int flushing;
 
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        seq = work->queue_seq;
        work->flushing++;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        wait_event(work->done, ({
-                  spin_lock_irq(&poll->dev->work_lock);
+                  spin_lock_irq(&dev->work_lock);
                   left = seq - work->done_seq <= 0;
-                  spin_unlock_irq(&poll->dev->work_lock);
+                  spin_unlock_irq(&dev->work_lock);
                   left;
        }));
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        flushing = --work->flushing;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        BUG_ON(flushing < 0);
 }
 
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+       vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+                                   struct vhost_work *work)
 {
-       struct vhost_dev *dev = poll->dev;
-       struct vhost_work *work = &poll->work;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
        spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+       vhost_work_queue(poll->dev, &poll->work);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
        return dev->mm == current->mm ? 0 : -EPERM;
 }
 
+struct vhost_attach_cgroups_struct {
+        struct vhost_work work;
+        struct task_struct *owner;
+        int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+        struct vhost_attach_cgroups_struct *s;
+        s = container_of(work, struct vhost_attach_cgroups_struct, work);
+        s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+        struct vhost_attach_cgroups_struct attach;
+        attach.owner = current;
+        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+        vhost_work_queue(dev, &attach.work);
+        vhost_work_flush(dev, &attach.work);
+        return attach.ret;
+}
+
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
        }
 
        dev->worker = worker;
-       err = cgroup_attach_task_current_cg(worker);
+       wake_up_process(worker);        /* avoid contributing to loadavg */
+
+       err = vhost_attach_cgroups(dev);
        if (err)
                goto err_cgroup;
-       wake_up_process(worker);        /* avoid contributing to loadavg */
 
        return 0;
 err_cgroup:
        kthread_stop(worker);
+       dev->worker = NULL;
 err_worker:
        if (dev->mm)
                mmput(dev->mm);
index ed3e92e41c6e5683ad3dbb823e48259f5150ac33..5a53d8f039a2934ab02abcf19b312aee80b88558 100644 (file)
@@ -578,7 +578,11 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       return cgroup_attach_task_all(current, tsk);
+}
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +640,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 }
 
 /* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+                                        struct task_struct *t)
+{
+       return 0;
+}
 static inline int cgroup_attach_task_current_cg(struct task_struct *t)
 {
        return 0;
index 192f88c5b0f9df29b80d51d4c5ceba5388d099eb..ed19afd9e3fe3f74eacada4c6b79deca210a689c 100644 (file)
@@ -1791,10 +1791,11 @@ out:
 }
 
 /**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
  * @tsk: the task to be attached
  */
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
        struct cgroup *cur_cg;
@@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(current, root);
+               cur_cg = task_cgroup_from_root(from, root);
                retval = cgroup_attach_task(cur_cg, tsk);
                if (retval)
                        break;
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        return retval;
 }
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex