]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - kernel/cgroup_freezer.c
Merge branch 'audit.b64' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit...
[net-next-2.6.git] / kernel / cgroup_freezer.c
index c287627bed3d26b2b9b90ff9ce262cb78aadf6e7..e7bebb7c6c38ff7ddaefb807d7e50cd1bcc1adbc 100644 (file)
@@ -153,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
-/* Task is frozen or will freeze immediately when next it gets woken */
-static bool is_task_frozen_enough(struct task_struct *task)
-{
-       return frozen(task) ||
-               (task_is_stopped_or_traced(task) && freezing(task));
-}
-
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -236,31 +229,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 /*
  * caller must hold freezer->lock
  */
-static void update_freezer_state(struct cgroup *cgroup,
+static void update_if_frozen(struct cgroup *cgroup,
                                 struct freezer *freezer)
 {
        struct cgroup_iter it;
        struct task_struct *task;
        unsigned int nfrozen = 0, ntotal = 0;
+       enum freezer_state old_state = freezer->state;
 
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        nfrozen++;
        }
 
-       /*
-        * Transition to FROZEN when no new tasks can be added ensures
-        * that we never exist in the FROZEN state while there are unfrozen
-        * tasks.
-        */
-       if (nfrozen == ntotal)
-               freezer->state = CGROUP_FROZEN;
-       else if (nfrozen > 0)
-               freezer->state = CGROUP_FREEZING;
-       else
-               freezer->state = CGROUP_THAWED;
+       if (old_state == CGROUP_THAWED) {
+               BUG_ON(nfrozen > 0);
+       } else if (old_state == CGROUP_FREEZING) {
+               if (nfrozen == ntotal)
+                       freezer->state = CGROUP_FROZEN;
+       } else { /* old_state == CGROUP_FROZEN */
+               BUG_ON(nfrozen != ntotal);
+       }
+
        cgroup_iter_end(cgroup, &it);
 }
 
@@ -279,7 +271,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
        if (state == CGROUP_FREEZING) {
                /* We change from FREEZING to FROZEN lazily if the cgroup was
                 * only partially frozen when we exitted write. */
-               update_freezer_state(cgroup, freezer);
+               update_if_frozen(cgroup, freezer);
                state = freezer->state;
        }
        spin_unlock_irq(&freezer->lock);
@@ -301,7 +293,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (is_task_frozen_enough(task))
+               if (frozen(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
@@ -335,7 +327,7 @@ static int freezer_change_state(struct cgroup *cgroup,
 
        spin_lock_irq(&freezer->lock);
 
-       update_freezer_state(cgroup, freezer);
+       update_if_frozen(cgroup, freezer);
        if (goal_state == freezer->state)
                goto out;