]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
lglock: make lg_lock_global() actually lock globally
authorJonathan Corbet <corbet@lwn.net>
Wed, 8 Sep 2010 22:54:54 +0000 (16:54 -0600)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Sep 2010 16:09:43 +0000 (09:09 -0700)
lg_lock_global() currently only acquires spinlocks for online CPUs, but
it's meant to lock all possible CPUs.  Lglock-protected resources may be
associated with removed CPUs - and, indeed, that could happen with the
per-superblock open files lists.

At Nick's suggestion, change for_each_online_cpu() to
for_each_possible_cpu() to protect accesses to those resources.

Cc: Al Viro <viro@ZenIV.linux.org.uk>
Acked-by: Nick Piggin <npiggin@kernel.dk>
Signed-off-by: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/lglock.h

index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \