]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
locking: Convert raw_rwlock functions to arch_rwlock
authorThomas Gleixner <tglx@linutronix.de>
Thu, 3 Dec 2009 19:08:46 +0000 (20:08 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 14 Dec 2009 22:55:32 +0000 (23:55 +0100)
Name space cleanup for rwlock functions. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
18 files changed:
arch/alpha/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock.h
arch/cris/include/arch-v32/arch/spinlock.h
arch/ia64/include/asm/spinlock.h
arch/m32r/include/asm/spinlock.h
arch/mips/include/asm/spinlock.h
arch/parisc/include/asm/spinlock.h
arch/powerpc/include/asm/spinlock.h
arch/s390/include/asm/spinlock.h
arch/s390/lib/spinlock.c
arch/sh/include/asm/spinlock.h
arch/sparc/include/asm/spinlock_32.h
arch/sparc/include/asm/spinlock_64.h
arch/x86/include/asm/spinlock.h
include/linux/rwlock.h
include/linux/spinlock_up.h
lib/spinlock_debug.c

index e8b2970f037ba9fd5a19db50b2196c679cb3cfe6..d0faca1e992df0ffc9ca0ed12408e132da9b798a 100644 (file)
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 /***********************************************************/
 
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
 {
        return (lock->lock & 1) == 0;
 }
 
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
 {
        return lock->lock == 0;
 }
 
-static inline void __raw_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
 {
        long regx;
 
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock)
        : "m" (*lock) : "memory");
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
 {
        long regx;
 
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock)
        : "m" (*lock) : "memory");
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock)
        return success;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock)
        return success;
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
 {
        long regx;
        __asm__ __volatile__(
@@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock)
        : "m" (*lock) : "memory");
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
 {
        mb();
        lock->lock = 0;
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index a8671d8bc7d408247209ee9f9a4b36b73ad2d870..c91c64cab922c908186511a530f72673dd82d3b0 100644 (file)
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  * just write zero since the lock is exclusively held.
  */
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        smp_mb();
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
        }
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        smp_mb();
 
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
 }
 
 /* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x)                ((x)->lock == 0)
+#define arch_write_can_lock(x)         ((x)->lock == 0)
 
 /*
  * Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
  * currently active.  However, we know we won't have any write
  * locks.
  */
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
        smp_mb();
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        : "cc");
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2 = 1;
 
@@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
 }
 
 /* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x)         ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x)          ((x)->lock < 0x80000000)
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 7e1c56b0a571c7e4e03cb105dc9dfc7e96b10b2f..1942ccfedbe01fc0b9dfe5681aec33e5e2258a8b 100644 (file)
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
 asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
 asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
 asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_read_lock_asm(volatile int *ptr);
-asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_write_lock_asm(volatile int *ptr);
-asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
+asmlinkage void arch_read_lock_asm(volatile int *ptr);
+asmlinkage int arch_read_trylock_asm(volatile int *ptr);
+asmlinkage void arch_read_unlock_asm(volatile int *ptr);
+asmlinkage void arch_write_lock_asm(volatile int *ptr);
+asmlinkage int arch_write_trylock_asm(volatile int *ptr);
+asmlinkage void arch_write_unlock_asm(volatile int *ptr);
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
@@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                cpu_relax();
 }
 
-static inline int __raw_read_can_lock(arch_rwlock_t *rw)
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
 {
        return __raw_uncached_fetch_asm(&rw->lock) > 0;
 }
 
-static inline int __raw_write_can_lock(arch_rwlock_t *rw)
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
 {
        return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
 }
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
-       __raw_read_lock_asm(&rw->lock);
+       arch_read_lock_asm(&rw->lock);
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-       return __raw_read_trylock_asm(&rw->lock);
+       return arch_read_trylock_asm(&rw->lock);
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-       __raw_read_unlock_asm(&rw->lock);
+       arch_read_unlock_asm(&rw->lock);
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-       __raw_write_lock_asm(&rw->lock);
+       arch_write_lock_asm(&rw->lock);
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       return __raw_write_trylock_asm(&rw->lock);
+       return arch_write_trylock_asm(&rw->lock);
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-       __raw_write_unlock_asm(&rw->lock);
+       arch_write_unlock_asm(&rw->lock);
 }
 
 #define arch_spin_relax(lock)          cpu_relax()
index 1d7d3a8046cb2bae38d294672c4685e86a310e12..f171a6600fbcac6424376d65aeb35424d57e2b6e 100644 (file)
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
  *
  */
 
-static inline int __raw_read_can_lock(arch_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
 {
        return (int)(x)->lock > 0;
 }
 
-static inline int __raw_write_can_lock(arch_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
 {
        return (x)->lock == RW_LOCK_BIAS;
 }
 
-static  inline void __raw_read_lock(arch_rwlock_t *rw)
+static  inline void arch_read_lock(arch_rwlock_t *rw)
 {
        arch_spin_lock(&rw->slock);
        while (rw->lock == 0);
@@ -74,7 +74,7 @@ static  inline void __raw_read_lock(arch_rwlock_t *rw)
        arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_write_lock(arch_rwlock_t *rw)
+static  inline void arch_write_lock(arch_rwlock_t *rw)
 {
        arch_spin_lock(&rw->slock);
        while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static  inline void __raw_write_lock(arch_rwlock_t *rw)
        arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_read_unlock(arch_rwlock_t *rw)
+static  inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        arch_spin_lock(&rw->slock);
        rw->lock++;
        arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_write_unlock(arch_rwlock_t *rw)
+static  inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        arch_spin_lock(&rw->slock);
        while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static  inline void __raw_write_unlock(arch_rwlock_t *rw)
        arch_spin_unlock(&rw->slock);
 }
 
-static  inline int __raw_read_trylock(arch_rwlock_t *rw)
+static  inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        int ret = 0;
        arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static  inline int __raw_read_trylock(arch_rwlock_t *rw)
        return ret;
 }
 
-static  inline int __raw_write_trylock(arch_rwlock_t *rw)
+static  inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        int ret = 0;
        arch_spin_lock(&rw->slock);
index 6715b6a8ebc3d43ffae3127f49b6a6682ddde509..1a91c9121d17b5f032b192905370a69d7d24bee8 100644 (file)
@@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
        __ticket_spin_unlock_wait(lock);
 }
 
-#define __raw_read_can_lock(rw)                (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw)       (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw)         (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw)        (*(volatile int *)(rw) == 0)
 
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 {
        __asm__ __volatile__ (
                "tbit.nz p6, p0 = %1,%2\n"
@@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
                : "p6", "p7", "r2", "memory");
 }
 
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
 
 #else /* !ASM_SUPPORTED */
 
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
 
-#define __raw_read_lock(rw)                                                            \
+#define arch_read_lock(rw)                                                             \
 do {                                                                                   \
        arch_rwlock_t *__read_lock_ptr = (rw);                                          \
                                                                                        \
@@ -188,7 +188,7 @@ do {                                                                                        \
 
 #endif /* !ASM_SUPPORTED */
 
-#define __raw_read_unlock(rw)                                  \
+#define arch_read_unlock(rw)                                   \
 do {                                                           \
        arch_rwlock_t *__read_lock_ptr = (rw);                  \
        ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
@@ -197,7 +197,7 @@ do {                                                                \
 #ifdef ASM_SUPPORTED
 
 static __always_inline void
-__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 {
        __asm__ __volatile__ (
                "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
                : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
 }
 
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
 
-#define __raw_write_trylock(rw)                                                        \
+#define arch_write_trylock(rw)                                                 \
 ({                                                                             \
        register long result;                                                   \
                                                                                \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
        (result == 0);                                                          \
 })
 
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
 {
        u8 *y = (u8 *)x;
        barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
 
 #else /* !ASM_SUPPORTED */
 
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
 
-#define __raw_write_lock(l)                                                            \
+#define arch_write_lock(l)                                                             \
 ({                                                                                     \
        __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
        __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
        } while (ia64_val);                                                             \
 })
 
-#define __raw_write_trylock(rw)                                                \
+#define arch_write_trylock(rw)                                         \
 ({                                                                     \
        __u64 ia64_val;                                                 \
        __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
        (ia64_val == 0);                                                \
 })
 
-static inline void __raw_write_unlock(arch_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
 {
        barrier();
        x->write_lock = 0;
@@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
 
 #endif /* !ASM_SUPPORTED */
 
-static inline int __raw_read_trylock(arch_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
 {
        union {
                arch_rwlock_t lock;
index 1c76af8c8e1b5e19249a63d7806261123c30e10b..179a06489b1086036cae78a0fadd59801f2da8f1 100644 (file)
@@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
        );
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t*)lock;
        if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock)
        return 0;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,8 +316,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock)
        return 0;
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 7bf27c8a336462914301050e34c223ba4e862f86..21ef9efbde43f9e61cf726961e81e91c7252d6b5 100644 (file)
@@ -248,21 +248,21 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(rw)        ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
 
 /*
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(rw)       (!(rw)->lock)
+#define arch_write_can_lock(rw)        (!(rw)->lock)
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_read_lock       \n"
+               "       .set    noreorder       # arch_read_lock        \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 1b                                  \n"
                "        addu   %1, 1                                   \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_read_lock       \n"
+               "       .set    noreorder       # arch_read_lock        \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 2f                                  \n"
                "        addu   %1, 1                                   \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
 /* Note the use of sub, not subu which will make the kernel die with an
    overflow exception if we ever try to unlock an rwlock that is already
    unlocked or is being held by a writer.  */
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
 
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "1:     ll      %1, %2          # __raw_read_unlock     \n"
+               "1:     ll      %1, %2          # arch_read_unlock      \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_read_unlock     \n"
+               "       .set    noreorder       # arch_read_unlock      \n"
                "1:     ll      %1, %2                                  \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        }
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_write_lock      \n"
+               "       .set    noreorder       # arch_write_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_write_lock      \n"
+               "       .set    noreorder       # arch_write_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 2f                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        smp_llsc_mb();
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        smp_mb();
 
        __asm__ __volatile__(
-       "                               # __raw_write_unlock    \n"
+       "                               # arch_write_unlock     \n"
        "       sw      $0, %0                                  \n"
        : "=m" (rw->lock)
        : "m" (rw->lock)
        : "memory");
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
        int ret;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_read_trylock    \n"
+               "       .set    noreorder       # arch_read_trylock     \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bltz    %1, 2f                                  \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_read_trylock    \n"
+               "       .set    noreorder       # arch_read_trylock     \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bltz    %1, 2f                                  \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
        return ret;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
        int ret;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_write_trylock   \n"
+               "       .set    noreorder       # arch_write_trylock    \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # __raw_write_trylock   \n"
+               "       .set    noreorder       # arch_write_trylock    \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
@@ -480,8 +480,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
        return ret;
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 1ff3a0a94a433b2048fca719cd6774bc5c0514f6..74036f436a3b51e9f6c6f1b55ba1f56502aa07f7 100644 (file)
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
 
 /* Note that we have to ensure interrupts are disabled in case we're
  * interrupted by some other code that wants to grab the same read lock */
-static  __inline__ void __raw_read_lock(arch_rwlock_t *rw)
+static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
@@ -81,7 +81,7 @@ static  __inline__ void __raw_read_lock(arch_rwlock_t *rw)
 
 /* Note that we have to ensure interrupts are disabled in case we're
  * interrupted by some other code that wants to grab the same read lock */
-static  __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
+static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
@@ -93,7 +93,7 @@ static  __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
 
 /* Note that we have to ensure interrupts are disabled in case we're
  * interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long flags;
  retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
 
 /* Note that we have to ensure interrupts are disabled in case we're
  * interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long flags;
 retry:
@@ -141,7 +141,7 @@ retry:
        local_irq_restore(flags);
 }
 
-static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
 {
        rw->counter = 0;
        arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
 
 /* Note that we have to ensure interrupts are disabled in case we're
  * interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long flags;
        int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
 {
        return rw->counter >= 0;
 }
@@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
 {
        return !rw->counter;
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 2fad2c07c5934b16bf636ef48c72cbac8ccea540..764094cff68172609db5ce6d4e05a1a2b8229598 100644 (file)
@@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
  * read-locks.
  */
 
-#define __raw_read_can_lock(rw)                ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw)       (!(rw)->lock)
+#define arch_read_can_lock(rw)         ((rw)->lock >= 0)
+#define arch_write_can_lock(rw)        (!(rw)->lock)
 
 #ifdef CONFIG_PPC64
 #define __DO_SIGN_EXTEND       "extsw  %0,%0\n"
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
  * This returns the old value in the lock + 1,
  * so we got a read lock if the return value is > 0.
  */
-static inline long arch_read_trylock(arch_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
 {
        long tmp;
 
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(arch_rwlock_t *rw)
  * This returns the old value in the lock,
  * so we got the write lock if the return value is 0.
  */
-static inline long arch_write_trylock(arch_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
 {
        long tmp, token;
 
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(arch_rwlock_t *rw)
        return tmp;
 }
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        while (1) {
-               if (likely(arch_read_trylock(rw) > 0))
+               if (likely(__arch_read_trylock(rw) > 0))
                        break;
                do {
                        HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
        }
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        while (1) {
-               if (likely(arch_write_trylock(rw) == 0))
+               if (likely(__arch_write_trylock(rw) == 0))
                        break;
                do {
                        HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        }
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-       return arch_read_trylock(rw) > 0;
+       return __arch_read_trylock(rw) > 0;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-       return arch_write_trylock(rw) == 0;
+       return __arch_write_trylock(rw) == 0;
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        long tmp;
 
@@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        : "cr0", "xer", "memory");
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        __asm__ __volatile__("# write_unlock\n\t"
                                LWSYNC_ON_SMP: : :"memory");
        rw->lock = 0;
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  __spin_yield(lock)
 #define arch_read_relax(lock)  __rw_yield(lock)
index 7f98f0e48acb7a92c11f4eedb5a7ec9afe866a86..a587907d77f32e208cc0c8ed6e8da87f050c757e 100644 (file)
@@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
 
 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
@@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp);
 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
                _raw_read_lock_wait(rw);
 }
 
-static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
                _raw_read_lock_wait_flags(rw, flags);
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int old, cmp;
 
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        } while (cmp != old);
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
                _raw_write_lock_wait(rw);
 }
 
-static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
 {
        if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
                _raw_write_lock_wait_flags(rw, flags);
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
        return _raw_read_trylock_retry(rw);
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
                return 1;
index 09fee9a1aa156e01b1309c2e9b275dea2b0d51b8..10754a3756684900ea1570cc1c21b5d6bc800240 100644 (file)
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
                        _raw_yield();
                        count = spin_retry;
                }
-               if (!__raw_read_can_lock(rw))
+               if (!arch_read_can_lock(rw))
                        continue;
                old = rw->lock & 0x7fffffffU;
                if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
                        _raw_yield();
                        count = spin_retry;
                }
-               if (!__raw_read_can_lock(rw))
+               if (!arch_read_can_lock(rw))
                        continue;
                old = rw->lock & 0x7fffffffU;
                local_irq_disable();
@@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
        int count = spin_retry;
 
        while (count-- > 0) {
-               if (!__raw_read_can_lock(rw))
+               if (!arch_read_can_lock(rw))
                        continue;
                old = rw->lock & 0x7fffffffU;
                if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
                        _raw_yield();
                        count = spin_retry;
                }
-               if (!__raw_write_can_lock(rw))
+               if (!arch_write_can_lock(rw))
                        continue;
                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
                        return;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
                        _raw_yield();
                        count = spin_retry;
                }
-               if (!__raw_write_can_lock(rw))
+               if (!arch_write_can_lock(rw))
                        continue;
                local_irq_disable();
                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
        int count = spin_retry;
 
        while (count-- > 0) {
-               if (!__raw_write_can_lock(rw))
+               if (!arch_write_can_lock(rw))
                        continue;
                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
                        return 1;
index 7f3626aac869c3fb3eb1422c1630b5495b4119a6..bdc0f3b6c56afdf23d6b628128e6171cc2552b74 100644 (file)
@@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(x) ((x)->lock > 0)
+#define arch_read_can_lock(x)  ((x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(x)        ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! __raw_read_lock       \n\t"
+               "movli.l        @%1, %0 ! arch_read_lock        \n\t"
                "cmp/pl         %0                              \n\t"
                "bf             1b                              \n\t"
                "add            #-1, %0                         \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! __raw_read_unlock     \n\t"
+               "movli.l        @%1, %0 ! arch_read_unlock      \n\t"
                "add            #1, %0                          \n\t"
                "movco.l        %0, @%1                         \n\t"
                "bf             1b                              \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! __raw_write_lock      \n\t"
+               "movli.l        @%1, %0 ! arch_write_lock       \n\t"
                "cmp/hs         %2, %0                          \n\t"
                "bf             1b                              \n\t"
                "sub            %2, %0                          \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        );
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        __asm__ __volatile__ (
-               "mov.l          %1, @%0 ! __raw_write_unlock    \n\t"
+               "mov.l          %1, @%0 ! arch_write_unlock     \n\t"
                :
                : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
                : "t", "memory"
        );
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long tmp, oldval;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%2, %0 ! __raw_read_trylock    \n\t"
+               "movli.l        @%2, %0 ! arch_read_trylock     \n\t"
                "mov            %0, %1                          \n\t"
                "cmp/pl         %0                              \n\t"
                "bf             2f                              \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
        return (oldval > 0);
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long tmp, oldval;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%2, %0 ! __raw_write_trylock   \n\t"
+               "movli.l        @%2, %0 ! arch_write_trylock    \n\t"
                "mov            %0, %1                          \n\t"
                "cmp/hs         %3, %0                          \n\t"
                "bf             2f                              \n\t"
@@ -216,8 +216,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
        return (oldval > (RW_LOCK_BIAS - 1));
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 06d37e588fde7658cc490504595869b0bb4bade3..7f9b9dba38a64896705ecb85b9d897e71125096d 100644 (file)
@@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
  *
  * Unfortunately this scheme limits us to ~16,000,000 cpus.
  */
-static inline void arch_read_lock(arch_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
 {
        register arch_rwlock_t *lp asm("g1");
        lp = rw;
@@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       arch_read_lock(lock); \
+       __arch_read_lock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
 {
        register arch_rwlock_t *lp asm("g1");
        lp = rw;
@@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       arch_read_unlock(lock); \
+       __arch_read_unlock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        register arch_rwlock_t *lp asm("g1");
        lp = rw;
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
        *(volatile __u32 *)&lp->lock = ~0U;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned int val;
 
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
        return (val == 0);
 }
 
-static inline int arch_read_trylock(arch_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
 {
        register arch_rwlock_t *lp asm("g1");
        register int res asm("o0");
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        return res;
 }
 
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
 ({     unsigned long flags; \
        int res; \
        local_irq_save(flags); \
-       res = arch_read_trylock(lock); \
+       res = __arch_read_trylock(lock); \
        local_irq_restore(flags); \
        res; \
 })
 
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw)  do { (rw)->lock = 0; } while(0)
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags)   __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags)  __raw_write_lock(rw)
+#define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
 
 #endif /* !(__ASSEMBLY__) */
 
index 2b22d7f2c2fb623ac62cb92cda31cc04c06ea50a..073936a8b2755e1c15e637ff28c5bc607876f4dc 100644 (file)
@@ -210,17 +210,17 @@ static int inline arch_write_trylock(arch_rwlock_t *lock)
        return result;
 }
 
-#define __raw_read_lock(p)     arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p)  arch_read_trylock(p)
-#define __raw_read_unlock(p)   arch_read_unlock(p)
-#define __raw_write_lock(p)    arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p)  arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
-
-#define __raw_read_can_lock(rw)                (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw)       (!(rw)->lock)
+#define arch_read_lock(p)      arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p)   arch_read_trylock(p)
+#define arch_read_unlock(p)    arch_read_unlock(p)
+#define arch_write_lock(p)     arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p)   arch_write_unlock(p)
+#define arch_write_trylock(p)  arch_write_trylock(p)
+
+#define arch_read_can_lock(rw)         (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw)        (!(rw)->lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 99cb86e843a0136ebfc9ca3a0daf3d1e7b8b53fe..3089f70c0c52059e569c8745d1dcca089daee8af 100644 (file)
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-static inline int __raw_read_can_lock(arch_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
 {
        return (int)(lock)->lock > 0;
 }
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(arch_rwlock_t *lock)
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-static inline int __raw_write_can_lock(arch_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
 {
        return (lock)->lock == RW_LOCK_BIAS;
 }
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
                     "jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
                     ::LOCK_PTR_REG (rw) : "memory");
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
                     "jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
                     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
 
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock)
        return 0;
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
 
@@ -284,19 +284,19 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock)
        return 0;
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        asm volatile(LOCK_PREFIX "addl %1, %0"
                     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)  cpu_relax()
 #define arch_read_relax(lock)  cpu_relax()
index 73785b0bd6b935bb928e59a5fc2f16dde5b0bbcb..5725b034defec94f1d7b2139a031753ca86a96e1 100644 (file)
@@ -38,20 +38,20 @@ do {                                                                \
  extern int _raw_write_trylock(rwlock_t *lock);
  extern void _raw_write_unlock(rwlock_t *lock);
 #else
-# define _raw_read_lock(rwlock)                __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock(rwlock)                arch_read_lock(&(rwlock)->raw_lock)
 # define _raw_read_lock_flags(lock, flags) \
-               __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock)     __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock)      __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock)       __raw_write_lock(&(rwlock)->raw_lock)
+               arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_trylock(rwlock)     arch_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock)      arch_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock)       arch_write_lock(&(rwlock)->raw_lock)
 # define _raw_write_lock_flags(lock, flags) \
-               __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock)    __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock)     __raw_write_unlock(&(rwlock)->raw_lock)
+               arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_write_trylock(rwlock)    arch_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock)     arch_write_unlock(&(rwlock)->raw_lock)
 #endif
 
-#define read_can_lock(rwlock)          __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock)         __raw_write_can_lock(&(rwlock)->raw_lock)
+#define read_can_lock(rwlock)          arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock)         arch_write_can_lock(&(rwlock)->raw_lock)
 
 /*
  * Define the various rw_lock methods.  Note we define these
index 1d3bcc3cf7c665eb14cc107260efab90192281d2..b14f6a91e19f33c214446d442667665aa2cd6310 100644 (file)
@@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 /*
  * Read-write spinlocks. No debug version.
  */
-#define __raw_read_lock(lock)          do { (void)(lock); } while (0)
-#define __raw_write_lock(lock)         do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock)       ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock)      ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock)                do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock)       do { (void)(lock); } while (0)
+#define arch_read_lock(lock)           do { (void)(lock); } while (0)
+#define arch_write_lock(lock)          do { (void)(lock); } while (0)
+#define arch_read_trylock(lock)        ({ (void)(lock); 1; })
+#define arch_write_trylock(lock)       ({ (void)(lock); 1; })
+#define arch_read_unlock(lock)         do { (void)(lock); } while (0)
+#define arch_write_unlock(lock)        do { (void)(lock); } while (0)
 
 #else /* DEBUG_SPINLOCK */
 #define arch_spin_is_locked(lock)      ((void)(lock), 0)
@@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 #define arch_spin_is_contended(lock)   (((void)(lock), 0))
 
-#define __raw_read_can_lock(lock)      (((void)(lock), 1))
-#define __raw_write_can_lock(lock)     (((void)(lock), 1))
+#define arch_read_can_lock(lock)       (((void)(lock), 1))
+#define arch_write_can_lock(lock)      (((void)(lock), 1))
 
 #define arch_spin_unlock_wait(lock) \
                do { cpu_relax(); } while (arch_spin_is_locked(lock))
index 3f72f10d9cb0adf3c1f99b994ab65ffc7d607acd..0cea0bf6114eff4434edf248a10bd27d3c74f067 100644 (file)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
 
        for (;;) {
                for (i = 0; i < loops; i++) {
-                       if (__raw_read_trylock(&lock->raw_lock))
+                       if (arch_read_trylock(&lock->raw_lock))
                                return;
                        __delay(1);
                }
@@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *lock)
 void _raw_read_lock(rwlock_t *lock)
 {
        RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
-       __raw_read_lock(&lock->raw_lock);
+       arch_read_lock(&lock->raw_lock);
 }
 
 int _raw_read_trylock(rwlock_t *lock)
 {
-       int ret = __raw_read_trylock(&lock->raw_lock);
+       int ret = arch_read_trylock(&lock->raw_lock);
 
 #ifndef CONFIG_SMP
        /*
@@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock)
 void _raw_read_unlock(rwlock_t *lock)
 {
        RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
-       __raw_read_unlock(&lock->raw_lock);
+       arch_read_unlock(&lock->raw_lock);
 }
 
 static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
 
        for (;;) {
                for (i = 0; i < loops; i++) {
-                       if (__raw_write_trylock(&lock->raw_lock))
+                       if (arch_write_trylock(&lock->raw_lock))
                                return;
                        __delay(1);
                }
@@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t *lock)
 void _raw_write_lock(rwlock_t *lock)
 {
        debug_write_lock_before(lock);
-       __raw_write_lock(&lock->raw_lock);
+       arch_write_lock(&lock->raw_lock);
        debug_write_lock_after(lock);
 }
 
 int _raw_write_trylock(rwlock_t *lock)
 {
-       int ret = __raw_write_trylock(&lock->raw_lock);
+       int ret = arch_write_trylock(&lock->raw_lock);
 
        if (ret)
                debug_write_lock_after(lock);
@@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock)
 void _raw_write_unlock(rwlock_t *lock)
 {
        debug_write_unlock(lock);
-       __raw_write_unlock(&lock->raw_lock);
+       arch_write_unlock(&lock->raw_lock);
 }