]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
locking: Convert raw_spinlock to arch_spinlock
authorThomas Gleixner <tglx@linutronix.de>
Wed, 2 Dec 2009 18:49:50 +0000 (19:49 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 14 Dec 2009 22:55:32 +0000 (23:55 +0100)
The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.

Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
51 files changed:
arch/alpha/include/asm/spinlock.h
arch/alpha/include/asm/spinlock_types.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock_types.h
arch/blackfin/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock_types.h
arch/cris/include/arch-v32/arch/spinlock.h
arch/ia64/include/asm/spinlock.h
arch/ia64/include/asm/spinlock_types.h
arch/m32r/include/asm/spinlock.h
arch/m32r/include/asm/spinlock_types.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spinlock_types.h
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/spinlock.h
arch/parisc/include/asm/spinlock_types.h
arch/parisc/lib/bitops.c
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/spinlock.h
arch/powerpc/include/asm/spinlock_types.h
arch/powerpc/kernel/rtas.c
arch/powerpc/lib/locks.c
arch/powerpc/platforms/pasemi/setup.c
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/spinlock_types.h
arch/s390/lib/spinlock.c
arch/sh/include/asm/spinlock.h
arch/sh/include/asm/spinlock_types.h
arch/sparc/include/asm/spinlock_32.h
arch/sparc/include/asm/spinlock_64.h
arch/sparc/include/asm/spinlock_types.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock_types.h
arch/x86/kernel/dumpstack.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/tsc_sync.c
arch/x86/xen/spinlock.c
include/asm-generic/bitops/atomic.h
include/linux/spinlock.h
include/linux/spinlock_types.h
include/linux/spinlock_types_up.h
include/linux/spinlock_up.h
kernel/lockdep.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c
lib/spinlock_debug.c

index e38fb95cb3352fd15c5bf3e505459bc8f702500f..bdb26a1940b4afdd8a674fc00cc0f5594aec18fe 100644 (file)
 #define __raw_spin_unlock_wait(x) \
                do { cpu_relax(); } while ((x)->lock)
 
-static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+static inline void __raw_spin_unlock(arch_spinlock_t * lock)
 {
        mb();
        lock->lock = 0;
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t * lock)
+static inline void __raw_spin_lock(arch_spinlock_t * lock)
 {
        long tmp;
 
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
        : "m"(lock->lock) : "memory");
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        return !test_and_set_bit(0, &lock->lock);
 }
index 8141eb5ebf0d207705f74a29364aec095339864d..bb94a51e53d2b12411126749db51e9c97ed81acb 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index c13681ac1ede0cada819fb8c577b2e0e7c82ae5c..4e7712ee93949f0e12fd0f668c6e07e016ca89c5 100644 (file)
@@ -23,7 +23,7 @@
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        smp_mb();
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        }
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        smp_mb();
 
index 43e83f6d2ee5321e5ee08ddb1ef2b8dfb5904c1a..5e9d3eadd1675232feabf26ef9c0ef924cf570c6 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index b0c7f0ee4b03e54d3d24004d9df75e1345c4ce8a..fc16b4c5309ba73ab1fd3531f2bc577f0e910d07 100644 (file)
@@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
 asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
 asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
 {
        return __raw_spin_is_locked_asm(&lock->lock);
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        __raw_spin_lock_asm(&lock->lock);
 }
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        return __raw_spin_trylock_asm(&lock->lock);
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __raw_spin_unlock_asm(&lock->lock);
 }
 
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (__raw_spin_is_locked(lock))
                cpu_relax();
index be75762c06100a3b2e19255a855c29dabd99c120..03b377abf5c07f11724ed250e4c686e7131f0137 100644 (file)
@@ -15,7 +15,7 @@
 
 typedef struct {
        volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index 367a53ea10c5714de719888a5ed12de83c97bc87..e253457765f2c0f284743906da257d4a7dc0a797 100644 (file)
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
 extern void cris_spin_lock(void *l);
 extern int cris_spin_trylock(void *l);
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int __raw_spin_is_locked(arch_spinlock_t *x)
 {
        return *(volatile signed char *)(&(x)->slock) <= 0;
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __asm__ volatile ("move.d %1,%0" \
                          : "=m" (lock->slock) \
@@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
                          : "memory");
 }
 
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (__raw_spin_is_locked(lock))
                cpu_relax();
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        return cris_spin_trylock((void *)&lock->slock);
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        cris_spin_lock((void *)&lock->slock);
 }
 
 static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
        __raw_spin_lock(lock);
 }
index 239ecdc9516d042e69fc5b1773cfa2fb29fcad4a..9fbdf7e6108739aa17a66478786148e2c71a9e1f 100644 (file)
@@ -38,7 +38,7 @@
 #define TICKET_BITS    15
 #define        TICKET_MASK     ((1 << TICKET_BITS) - 1)
 
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
 {
        int     *p = (int *)&lock->lock, ticket, serve;
 
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
        }
 }
 
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
        int tmp = ACCESS_ONCE(lock->lock);
 
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
        return 0;
 }
 
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
        unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
 
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
        ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 }
 
-static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
 {
        int     *p = (int *)&lock->lock, ticket;
 
@@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
        }
 }
 
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
        long tmp = ACCESS_ONCE(lock->lock);
 
        return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
 }
 
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
 {
        long tmp = ACCESS_ONCE(lock->lock);
 
        return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
 }
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
 {
        return __ticket_spin_is_locked(lock);
 }
 
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
 {
        return __ticket_spin_is_contended(lock);
 }
 #define __raw_spin_is_contended        __raw_spin_is_contended
 
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        __ticket_spin_lock(lock);
 }
 
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        return __ticket_spin_trylock(lock);
 }
 
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __ticket_spin_unlock(lock);
 }
 
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
                                                  unsigned long flags)
 {
        __raw_spin_lock(lock);
 }
 
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
 {
        __ticket_spin_unlock_wait(lock);
 }
index 474e46f1ab4a0a19527ecb2b2a32b05d520a1400..447ccc6ca7a84012fa327c58ce898adcd763de6e 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index dded923883b2549c52bebb63e5ba5fe664382cb8..0c0164225bc06311a1fac388b4ea843903ac8945 100644 (file)
@@ -36,7 +36,7 @@
  * __raw_spin_trylock() tries to get the lock and returns a result.
  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  */
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        int oldval;
        unsigned long tmp1, tmp2;
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        return (oldval > 0);
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        unsigned long tmp0, tmp1;
 
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        );
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        mb();
        lock->slock = 1;
index 83f52105c0e4a33f5ab282a78f9c25ce6f05a3bb..17d15bd6322d37af48586619e8645a0b8d6677eb 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
 
index 5b60a09a0f0894e8c8b1301683064b841a5b8e4b..0f16d0673b4aed9bd6d1f5d6d44ff69309d341ba 100644 (file)
@@ -34,7 +34,7 @@
  * becomes equal to the the initial value of the tail.
  */
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
 {
        unsigned int counters = ACCESS_ONCE(lock->lock);
 
@@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
 #define __raw_spin_unlock_wait(x) \
        while (__raw_spin_is_locked(x)) { cpu_relax(); }
 
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
 {
        unsigned int counters = ACCESS_ONCE(lock->lock);
 
@@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
 }
 #define __raw_spin_is_contended        __raw_spin_is_contended
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        int my_ticket;
        int tmp;
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        smp_llsc_mb();
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        int tmp;
 
@@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
        }
 }
 
-static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        int tmp, tmp2, tmp3;
 
index adeedaa116c14ef379c7b8fb8fcc1237b4c66bfd..2e1060892d3bca10c2e02bb70700f5b5b2ca14c6 100644 (file)
@@ -12,7 +12,7 @@ typedef struct {
         * bits 15..28: ticket
         */
        unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index 8bc9e96699b2a690456aa75585a9f89c05cad7bf..3a4ea778d4b6d679a4f28355352a7396a91523a6 100644 (file)
 #  define ATOMIC_HASH_SIZE 4
 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 
 /* Can't use raw_spin_lock_irq because of #include problems, so
  * this is the substitute */
 #define _atomic_spin_lock_irqsave(l,f) do {    \
-       raw_spinlock_t *s = ATOMIC_HASH(l);             \
+       arch_spinlock_t *s = ATOMIC_HASH(l);            \
        local_irq_save(f);                      \
        __raw_spin_lock(s);                     \
 } while(0)
 
 #define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       raw_spinlock_t *s = ATOMIC_HASH(l);                     \
+       arch_spinlock_t *s = ATOMIC_HASH(l);                    \
        __raw_spin_unlock(s);                           \
        local_irq_restore(f);                           \
 } while(0)
index fae03e136fa8619b6d1efc09d750992949ec3dc2..69e8dca26744702c27e5a43b70f4ed48b4d86536 100644 (file)
@@ -5,7 +5,7 @@
 #include <asm/processor.h>
 #include <asm/spinlock_types.h>
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int __raw_spin_is_locked(arch_spinlock_t *x)
 {
        volatile unsigned int *a = __ldcw_align(x);
        return *a == 0;
@@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
 #define __raw_spin_unlock_wait(x) \
                do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
                                         unsigned long flags)
 {
        volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
        mb();
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void __raw_spin_unlock(arch_spinlock_t *x)
 {
        volatile unsigned int *a;
        mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
        mb();
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int __raw_spin_trylock(arch_spinlock_t *x)
 {
        volatile unsigned int *a;
        int ret;
index 3f72f47cf4b20c798ac09a0b3ac520a069062046..735caafb81f5531239f3449e8761df71017040d3 100644 (file)
@@ -9,10 +9,10 @@ typedef struct {
        volatile unsigned int lock[4];
 # define __RAW_SPIN_LOCK_UNLOCKED      { { 1, 1, 1, 1 } }
 #endif
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 typedef struct {
-       raw_spinlock_t lock;
+       arch_spinlock_t lock;
        volatile int counter;
 } raw_rwlock_t;
 
index e3eb739fab19d3eb20c6d3ae2a837abb860ed7db..fdd7f583de540cbce8cebc0193c835aa9eae3e37 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/atomic.h>
 
 #ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
        [0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
 };
 #endif
index 168fce726201d3799fc3878e3a7620e1575a6dfd..20de73c36682e6cb7e75dbc2889a8304cc55afd7 100644 (file)
@@ -58,7 +58,7 @@ struct rtas_t {
        unsigned long entry;            /* physical address pointer */
        unsigned long base;             /* physical address pointer */
        unsigned long size;
-       raw_spinlock_t lock;
+       arch_spinlock_t lock;
        struct rtas_args args;
        struct device_node *dev;        /* virtual address pointer */
 };
index 198266cf9e2d2c95326d5ec9416bc6a2067d2fed..c0d44c92ff0ef45a8e6c661e23a9ff6ebe4d9aba 100644 (file)
@@ -54,7 +54,7 @@
  * This returns the old value in the lock, so we succeeded
  * in getting the lock if the return value is 0.
  */
-static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned long tmp, token;
 
@@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
        return tmp;
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        CLEAR_IO_SYNC;
        return arch_spin_trylock(lock) == 0;
@@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_lppaca()->shared_proc)
-extern void __spin_yield(raw_spinlock_t *lock);
+extern void __spin_yield(arch_spinlock_t *lock);
 extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
 #define __spin_yield(x)        barrier()
@@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock);
 #define SHARED_PROCESSOR       0
 #endif
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        CLEAR_IO_SYNC;
        while (1) {
@@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 }
 
 static inline
-void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
        unsigned long flags_dis;
 
@@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
        }
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        SYNC_IO;
        __asm__ __volatile__("# __raw_spin_unlock\n\t"
@@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 }
 
 #ifdef CONFIG_PPC64
-extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
 #else
 #define __raw_spin_unlock_wait(lock) \
        do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
index 74236c9f05b1ca85983ce74f504a903f2edc2fcb..4312e5baaf88610fb0871730007ad12a10557928 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index bf90361bb70f3664fb02742bc3514f145207b26d..579069c1215283519a6090b2b14ec742d4f906e6 100644 (file)
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
        return 1;
 }
 
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
 static u64 timebase = 0;
 
 void __cpuinit rtas_give_timebase(void)
index 79d0fa3a470d9eaf4f0811652775e16c7f1634ec..b06294cde499c3b8db937abad8080094fc46e58b 100644 (file)
@@ -25,7 +25,7 @@
 #include <asm/smp.h>
 #include <asm/firmware.h>
 
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
 
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
 }
 #endif
 
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void __raw_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (lock->slock) {
                HMT_low();
index a4619347aa7e61a702e7a7f6c584080a31d5f4fb..be36fece41d7106b59a45b4652fe05fd4398c469 100644 (file)
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
 }
 
 #ifdef CONFIG_SMP
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
 static unsigned long timebase;
 
 static void __devinit pas_give_timebase(void)
index c9af0d19c7ab7b4fc726528797be910ef7d7c0f3..6121fa4b83d98fb52fd32d1d9dc56582c5194d74 100644 (file)
@@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock,
        do { while (__raw_spin_is_locked(lock)) \
                 _raw_spin_relax(lock); } while (0)
 
-extern void _raw_spin_lock_wait(raw_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *);
-extern void _raw_spin_relax(raw_spinlock_t *lock);
+extern void _raw_spin_lock_wait(arch_spinlock_t *);
+extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int _raw_spin_trylock_retry(arch_spinlock_t *);
+extern void _raw_spin_relax(arch_spinlock_t *lock);
 
-static inline void __raw_spin_lock(raw_spinlock_t *lp)
+static inline void __raw_spin_lock(arch_spinlock_t *lp)
 {
        int old;
 
@@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp)
        _raw_spin_lock_wait(lp);
 }
 
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
+static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
                                         unsigned long flags)
 {
        int old;
@@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
        _raw_spin_lock_wait_flags(lp, flags);
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+static inline int __raw_spin_trylock(arch_spinlock_t *lp)
 {
        int old;
 
@@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp)
        return _raw_spin_trylock_retry(lp);
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+static inline void __raw_spin_unlock(arch_spinlock_t *lp)
 {
        _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
 }
index 654abc40de04a7b7729e3802b9d9b86244982da2..a93638eee3f79174e769b4680e5f99f9be05c063 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int owner_cpu;
-} __attribute__ ((aligned (4))) raw_spinlock_t;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index f7e0d30250b707b1ed46f7878f0f6790e7295e30..d4cbf71a6077d68201054db878d4de37247432ac 100644 (file)
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
                _raw_yield();
 }
 
-void _raw_spin_lock_wait(raw_spinlock_t *lp)
+void _raw_spin_lock_wait(arch_spinlock_t *lp)
 {
        int count = spin_retry;
        unsigned int cpu = ~smp_processor_id();
@@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
 }
 EXPORT_SYMBOL(_raw_spin_lock_wait);
 
-void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
+void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 {
        int count = spin_retry;
        unsigned int cpu = ~smp_processor_id();
@@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
 }
 EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
 
-int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+int _raw_spin_trylock_retry(arch_spinlock_t *lp)
 {
        unsigned int cpu = ~smp_processor_id();
        int count;
@@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp)
 }
 EXPORT_SYMBOL(_raw_spin_trylock_retry);
 
-void _raw_spin_relax(raw_spinlock_t *lock)
+void _raw_spin_relax(arch_spinlock_t *lock)
 {
        unsigned int cpu = lock->owner_cpu;
        if (cpu != 0)
index a28c9f0053fd4663f52d8c9fc1fb5a78feb31f48..5a05b3fcefbe699bbd7104ca9cfa36607afac436 100644 (file)
@@ -34,7 +34,7 @@
  *
  * We make no fairness assumptions.  They have a cost.
  */
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        unsigned long tmp;
        unsigned long oldval;
@@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        );
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
        );
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned long tmp, oldval;
 
index b4d244e7b60ca7532f7193e7365a9a67a293482f..37712c32ba9906fa06be096ade98ad660c1310c1 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED               { 1 }
 
index 857630cff6364ff7bf2caf09761224f930daf6c2..b2d8a67f727eb62982914ee370dd3dd765f46636 100644 (file)
@@ -15,7 +15,7 @@
 #define __raw_spin_unlock_wait(lock) \
        do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        __asm__ __volatile__(
        "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        : "g2", "memory", "cc");
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned int result;
        __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        return (result == 0);
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
 }
index 43e514783582d4722e2aa61249e249bbde5e6fc2..38e16c40efc49a695166c952099bf72c1ef54e36 100644 (file)
@@ -27,7 +27,7 @@
        do {    rmb();                  \
        } while((lp)->lock)
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        : "memory");
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned long result;
 
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        return (result == 0UL);
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __asm__ __volatile__(
 "      stb             %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
        : "memory");
 }
 
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
        unsigned long tmp1, tmp2;
 
index 37cbe01c585b5c9e91bdab7c013946bf11b87e65..41d9a8fec13d8ff99ebe3610f0e974b62a0a8fb0 100644 (file)
@@ -7,7 +7,7 @@
 
 typedef struct {
        volatile unsigned char lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index efb38994859c6c961c68a0cf301106098e0b86cc..5655f75f10b73497cbf7350ebe9d528855e7a0bc 100644 (file)
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
-static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
+static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
 {
        return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
 }
 
-static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
+static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
 {
        return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
 }
 #define __raw_spin_is_contended        __raw_spin_is_contended
 
-static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
+static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
 {
        PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
 }
 
-static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
+static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
                                                  unsigned long flags)
 {
        PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
 }
 
-static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
+static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
 {
        return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
 }
 
-static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
+static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
 {
        PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
 }
index 9357473c8da06b7d37f76fa22ea610942cbc90e7..b1e70d51e40c87ba57f8a6a6e6df197a27b56f58 100644 (file)
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
                           phys_addr_t phys, pgprot_t flags);
 };
 
-struct raw_spinlock;
+struct arch_spinlock;
 struct pv_lock_ops {
-       int (*spin_is_locked)(struct raw_spinlock *lock);
-       int (*spin_is_contended)(struct raw_spinlock *lock);
-       void (*spin_lock)(struct raw_spinlock *lock);
-       void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
-       int (*spin_trylock)(struct raw_spinlock *lock);
-       void (*spin_unlock)(struct raw_spinlock *lock);
+       int (*spin_is_locked)(struct arch_spinlock *lock);
+       int (*spin_is_contended)(struct arch_spinlock *lock);
+       void (*spin_lock)(struct arch_spinlock *lock);
+       void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+       int (*spin_trylock)(struct arch_spinlock *lock);
+       void (*spin_unlock)(struct arch_spinlock *lock);
 };
 
 /* This contains all the paravirt structures: we get a convenient
index 4e77853321dbcca412c52cf35c437bbff2442af8..204b524fcf572610c80fa4ae5715fc61a7b63edf 100644 (file)
@@ -58,7 +58,7 @@
 #if (NR_CPUS < 256)
 #define TICKET_SHIFT 8
 
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
 {
        short inc = 0x0100;
 
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
                : "memory", "cc");
 }
 
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
        int tmp, new;
 
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
        return tmp;
 }
 
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
        asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
                     : "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
 #else
 #define TICKET_SHIFT 16
 
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
 {
        int inc = 0x00010000;
        int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
                     : "memory", "cc");
 }
 
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
        int tmp;
        int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
        return tmp;
 }
 
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
        asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
                     : "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
 }
 #endif
 
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
        int tmp = ACCESS_ONCE(lock->slock);
 
        return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
 }
 
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
 {
        int tmp = ACCESS_ONCE(lock->slock);
 
@@ -174,33 +174,33 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
 
 #ifndef CONFIG_PARAVIRT_SPINLOCKS
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
 {
        return __ticket_spin_is_locked(lock);
 }
 
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
 {
        return __ticket_spin_is_contended(lock);
 }
 #define __raw_spin_is_contended        __raw_spin_is_contended
 
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        __ticket_spin_lock(lock);
 }
 
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        return __ticket_spin_trylock(lock);
 }
 
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        __ticket_spin_unlock(lock);
 }
 
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
                                                  unsigned long flags)
 {
        __raw_spin_lock(lock);
@@ -208,7 +208,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
 
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
 
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (__raw_spin_is_locked(lock))
                cpu_relax();
index 845f81c87091b6d63075faefecc1d74a93c45d85..2ae7637ed5248b2644788464ef955c3be29ad4b4 100644 (file)
@@ -5,9 +5,9 @@
 # error "please don't include this file directly"
 #endif
 
-typedef struct raw_spinlock {
+typedef struct arch_spinlock {
        unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
 
index b8ce165dde5dc42f4a4b0e8c9a1cc53ade630bfa..0862d9d89c9239da2b79e329d339bcc6d437c412 100644 (file)
@@ -188,7 +188,7 @@ void dump_stack(void)
 }
 EXPORT_SYMBOL(dump_stack);
 
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
 static int die_owner = -1;
 static unsigned int die_nest_count;
 
index 3a7c5a44082eee1639fa695e70d8fa55bea4389c..a0f39e090684f656783d951b7e8b81e63a46b3ce 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/paravirt.h>
 
 static inline void
-default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
        __raw_spin_lock(lock);
 }
index eed156851f5d6f85df71907cfeb5e73b62730d40..9f908b9d1abe8b3cdb444144fe024d071b3ce503 100644 (file)
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
  * we want to have the fastest, inlined, non-debug version
  * of a critical section, to be able to prove TSC time-warps:
  */
-static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
 
 static __cpuinitdata cycles_t last_tsc;
 static __cpuinitdata cycles_t max_warp;
index 36a5141108dfac6f6ad72a0591d40cded14d23a4..24ded31b5aeceeabfadc05fc5c5ff7aa95fa2b97 100644 (file)
@@ -120,14 +120,14 @@ struct xen_spinlock {
        unsigned short spinners;        /* count of waiting cpus */
 };
 
-static int xen_spin_is_locked(struct raw_spinlock *lock)
+static int xen_spin_is_locked(struct arch_spinlock *lock)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
 
        return xl->lock != 0;
 }
 
-static int xen_spin_is_contended(struct raw_spinlock *lock)
+static int xen_spin_is_contended(struct arch_spinlock *lock)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
 
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
        return xl->spinners != 0;
 }
 
-static int xen_spin_trylock(struct raw_spinlock *lock)
+static int xen_spin_trylock(struct arch_spinlock *lock)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
        u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
        __get_cpu_var(lock_spinners) = prev;
 }
 
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
+static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
        struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
        return ret;
 }
 
-static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
+static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
        unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
        spin_time_accum_total(start_spin);
 }
 
-static void xen_spin_lock(struct raw_spinlock *lock)
+static void xen_spin_lock(struct arch_spinlock *lock)
 {
        __xen_spin_lock(lock, false);
 }
 
-static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
 {
        __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
 }
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
        }
 }
 
-static void xen_spin_unlock(struct raw_spinlock *lock)
+static void xen_spin_unlock(struct arch_spinlock *lock)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
 
index c8946465e63ac872113c74241e06028aba8086f3..dcf0afad4a7f0a5ced25d7907dfee7702e3c49ec 100644 (file)
 #  define ATOMIC_HASH_SIZE 4
 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 
 /* Can't use raw_spin_lock_irq because of #include problems, so
  * this is the substitute */
 #define _atomic_spin_lock_irqsave(l,f) do {    \
-       raw_spinlock_t *s = ATOMIC_HASH(l);     \
+       arch_spinlock_t *s = ATOMIC_HASH(l);    \
        local_irq_save(f);                      \
        __raw_spin_lock(s);                     \
 } while(0)
 
 #define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       raw_spinlock_t *s = ATOMIC_HASH(l);             \
+       arch_spinlock_t *s = ATOMIC_HASH(l);            \
        __raw_spin_unlock(s);                           \
        local_irq_restore(f);                           \
 } while(0)
index a9aaa709fb937dfa999e8dc3363b44b2d8af8dfa..5ef7a4c060b5979a285163340d2483774262b67d 100644 (file)
@@ -8,7 +8,7 @@
  *
  * on SMP builds:
  *
- *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ *  asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
  *                        initializers
  *
  *  linux/spinlock_types.h:
@@ -75,7 +75,7 @@
 #define __lockfunc __attribute__((section(".spinlock.text")))
 
 /*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and raw_rwlock_t definitions:
  */
 #include <linux/spinlock_types.h>
 
index f979d5d8a160e64f7a06767d308490fbc16bb895..d4af2d7a86ea00420cc3b5ee710a02603769d45d 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/lockdep.h>
 
 typedef struct {
-       raw_spinlock_t raw_lock;
+       arch_spinlock_t raw_lock;
 #ifdef CONFIG_GENERIC_LOCKBREAK
        unsigned int break_lock;
 #endif
index 04135b0e198e31bfca6c3d1bea508d9af638f4b4..34d36691c4ec28c863b5c9d6818071283bf30856 100644 (file)
 
 typedef struct {
        volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED { 1 }
 
 #else
 
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED { }
 
index d4841ed8215b755ae2a291245687187479d4d27c..8ee2ac1bf6361937b164b5b050714b345fa69a3e 100644 (file)
 #ifdef CONFIG_DEBUG_SPINLOCK
 #define __raw_spin_is_locked(x)                ((x)->slock == 0)
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(arch_spinlock_t *lock)
 {
        lock->slock = 0;
 }
 
 static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
        local_irq_save(flags);
        lock->slock = 0;
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(arch_spinlock_t *lock)
 {
        char oldval = lock->slock;
 
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        return oldval > 0;
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(arch_spinlock_t *lock)
 {
        lock->slock = 1;
 }
index 429540c70d3f497ae7fc58998b0f248817a2cbf9..7cc50c62af598fd420577a344b6fc1291e648835 100644 (file)
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
  * to use a raw spinlock - we really dont want the spinlock
  * code to recurse back into the lockdep code...
  */
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 static int graph_lock(void)
 {
index a1ca4956ab5ec3673b04ddd839dd9edec26d1bf4..5ac8ee0a9e351455134fef43ab2b7e7db9718eb5 100644 (file)
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
        int                             cpu;
        struct ring_buffer              *buffer;
        spinlock_t                      reader_lock;    /* serialize readers */
-       raw_spinlock_t                  lock;
+       arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
        cpu_buffer->buffer = buffer;
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
-       cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
index c82dfd92fdfd8d663b6146ea6a584d42ea715508..7d56cecc2c6ea8bba5bc87d5d1e86d4e2c393c6a 100644 (file)
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  * protected by per_cpu spinlocks. But the action of the swap
  * needs its own lock.
  *
- * This is defined as a raw_spinlock_t in order to help
+ * This is defined as a arch_spinlock_t in order to help
  * with performance when lockdep debugging is enabled.
  *
  * It is also used in other places outside the update_max_tr
  * so it needs to be defined outside of the
  * CONFIG_TRACER_MAX_TRACE.
  */
-static raw_spinlock_t ftrace_max_lock =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t ftrace_max_lock =
+       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 unsigned long __read_mostly    tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
-static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
 
 /* temporary disable recording */
 static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  */
 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
-       static raw_spinlock_t trace_buf_lock =
-               (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       static arch_spinlock_t trace_buf_lock =
+               (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        static u32 trace_buf[TRACE_BUF_SIZE];
 
        struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
-       static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+       static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
        static char trace_buf[TRACE_BUF_SIZE];
 
        struct ftrace_event_call *call = &event_print;
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
 
 static void __ftrace_dump(bool disable_tracing)
 {
-       static raw_spinlock_t ftrace_dump_lock =
-               (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       static arch_spinlock_t ftrace_dump_lock =
+               (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
        unsigned int old_userobj;
index 878c03f386baef4b998cd9e0f6a7db2d02e6d692..206ec3d4b3c25650f701c6b6351896e9d84b1dc8 100644 (file)
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
 /* keep prev_time and lock in the same cacheline. */
 static struct {
        u64 prev_time;
-       raw_spinlock_t lock;
+       arch_spinlock_t lock;
 } trace_clock_struct ____cacheline_aligned_in_smp =
        {
-               .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+               .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
        };
 
 u64 notrace trace_clock_global(void)
index 26185d727676b0620496a9eb5a4832e9c3fdbd01..4cf7e83ec2357ff8f5f954a14ab29af929939ea3 100644 (file)
@@ -28,8 +28,8 @@ static int                    wakeup_current_cpu;
 static unsigned                        wakeup_prio = -1;
 static int                     wakeup_rt;
 
-static raw_spinlock_t wakeup_lock =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t wakeup_lock =
+       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 static void __wakeup_reset(struct trace_array *tr);
 
index 8504ac71e4e8f9831edd7cc0d3186f98fdcad6c1..9a82d568fdec604bb72e03c36f7bebea606c82bf 100644 (file)
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
 };
 
 static unsigned long max_stack_size;
-static raw_spinlock_t max_stack_lock =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t max_stack_lock =
+       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 static int stack_trace_disabled __read_mostly;
 static DEFINE_PER_CPU(int, trace_active);
index 9c4b0256490bd240b37f8811915e873463c88537..2acd501b382602baf9aa2c8879f1de4777d24986 100644 (file)
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
        debug_check_no_locks_freed((void *)lock, sizeof(*lock));
        lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
-       lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        lock->magic = SPINLOCK_MAGIC;
        lock->owner = SPINLOCK_OWNER_INIT;
        lock->owner_cpu = -1;