patch-2.4.10 linux/include/asm-ppc/spinlock.h

Next file: linux/include/asm-ppc/system.h
Previous file: linux/include/asm-ppc/softirq.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.9/linux/include/asm-ppc/spinlock.h linux/include/asm-ppc/spinlock.h
@@ -1,33 +1,79 @@
 /*
- * BK Id: SCCS/s.spinlock.h 1.5 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.spinlock.h 1.9 08/21/01 16:07:48 trini
  */
-#ifdef __KERNEL__
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/system.h>
+
+#undef SPINLOCK_DEBUG
+
 /*
  * Simple spin lock operations.
  */
 
 typedef struct {
 	volatile unsigned long lock;
+#ifdef SPINLOCK_DEBUG
 	volatile unsigned long owner_pc;
 	volatile unsigned long owner_cpu;
+#endif
 } spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED	(spinlock_t) { 0, 0, 0 }
-#define spin_lock_init(lp) 	do { (lp)->lock = 0; } while(0)
-#define spin_unlock_wait(lp)	do { barrier(); } while((lp)->lock)
+#ifdef __KERNEL__
+#if SPINLOCK_DEBUG
+#define SPINLOCK_DEBUG_INIT     , 0, 0
+#else
+#define SPINLOCK_DEBUG_INIT     /* */
+#endif
+
+#define SPIN_LOCK_UNLOCKED	(spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
+
+#define spin_lock_init(x) 	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
 #define spin_is_locked(x)	((x)->lock != 0)
+#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
+
+#ifndef SPINLOCK_DEBUG
+
+static inline void spin_lock(spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+	"b	1f			# spin_lock\n\
+2:	lwzx	%0,0,%1\n\
+	cmpwi	0,%0,0\n\
+	bne+	2b\n\
+1:	lwarx	%0,0,%1\n\
+	cmpwi	0,%0,0\n\
+	bne-	2b\n\
+	stwcx.	%2,0,%1\n\
+	bne-	2b\n\
+	isync"
+	: "=&r"(tmp)
+	: "r"(&lock->lock), "r"(1)
+	: "cr0", "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+	__asm__ __volatile__("eieio		# spin_unlock": : :"memory");
+	lock->lock = 0;
+}
+
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#else
 
 extern void _spin_lock(spinlock_t *lock);
 extern void _spin_unlock(spinlock_t *lock);
 extern int spin_trylock(spinlock_t *lock);
+extern unsigned long __spin_trylock(volatile unsigned long *lock);
 
 #define spin_lock(lp)			_spin_lock(lp)
 #define spin_unlock(lp)			_spin_unlock(lp)
 
-extern unsigned long __spin_trylock(volatile unsigned long *lock);
+#endif
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -41,12 +87,85 @@
  */
 typedef struct {
 	volatile unsigned long lock;
+#ifdef SPINLOCK_DEBUG
 	volatile unsigned long owner_pc;
+#endif
 } rwlock_t;
 
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+#if SPINLOCK_DEBUG
+#define RWLOCK_DEBUG_INIT     , 0
+#else
+#define RWLOCK_DEBUG_INIT     /* */
+#endif
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT }
 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
 
+#ifndef SPINLOCK_DEBUG
+
+static __inline__ void read_lock(rwlock_t *rw)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(
+	"b		2f		# read_lock\n\
+1:	lwzx		%0,0,%1\n\
+	cmpwi		0,%0,0\n\
+	blt+		1b\n\
+2:	lwarx		%0,0,%1\n\
+	addic.		%0,%0,1\n\
+	ble-		1b\n\
+	stwcx.		%0,0,%1\n\
+	bne-		2b\n\
+	isync"
+	: "=&r"(tmp)
+	: "r"(&rw->lock)
+	: "cr0", "memory");
+}
+
+static __inline__ void read_unlock(rwlock_t *rw)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(
+	"eieio				# read_unlock\n\
+1:	lwarx		%0,0,%1\n\
+	addic		%0,%0,-1\n\
+	stwcx.		%0,0,%1\n\
+	bne-		1b"
+	: "=&r"(tmp)
+	: "r"(&rw->lock)
+	: "cr0", "memory");
+}
+
+static __inline__ void write_lock(rwlock_t *rw)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(
+	"b		2f		# write_lock\n\
+1:	lwzx		%0,0,%1\n\
+	cmpwi		0,%0,0\n\
+	bne+		1b\n\
+2:	lwarx		%0,0,%1\n\
+	cmpwi		0,%0,0\n\
+	bne-		1b\n\
+	stwcx.		%2,0,%1\n\
+	bne-		2b\n\
+	isync"
+	: "=&r"(tmp)
+	: "r"(&rw->lock), "r"(-1)
+	: "cr0", "memory");
+}
+
+static __inline__ void write_unlock(rwlock_t *rw)
+{
+	__asm__ __volatile__("eieio		# write_unlock": : :"memory");
+	rw->lock = 0;
+}
+
+#else
+
 extern void _read_lock(rwlock_t *rw);
 extern void _read_unlock(rwlock_t *rw);
 extern void _write_lock(rwlock_t *rw);
@@ -56,6 +175,8 @@
 #define write_lock(rw)		_write_lock(rw)
 #define write_unlock(rw)	_write_unlock(rw)
 #define read_unlock(rw)		_read_unlock(rw)
+
+#endif
 
 #endif /* __ASM_SPINLOCK_H */
 #endif /* __KERNEL__ */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)