patch-2.4.14 linux/include/asm-i386/spinlock.h

Next file: linux/include/asm-m68k/atarihw.h
Previous file: linux/include/asm-i386/io.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.13/linux/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
@@ -66,9 +66,52 @@
 
 /*
  * This works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE)
+ * (PPro errata 66, 92)
  */
+ 
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define spin_unlock_string \
+	"movb $1,%0" \
+		:"=m" (lock->lock) : : "memory"
+
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+#if SPINLOCK_DEBUG
+	if (lock->magic != SPINLOCK_MAGIC)
+		BUG();
+	if (!spin_is_locked(lock))
+		BUG();
+#endif
+	__asm__ __volatile__(
+		spin_unlock_string
+	);
+}
+
+#else
+
 #define spin_unlock_string \
-	"movb $1,%0"
+	"xchgb %b0, %1" \
+		:"=q" (oldval), "=m" (lock->lock) \
+		:"0" (oldval) : "memory"
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+	char oldval = 1;
+#if SPINLOCK_DEBUG
+	if (lock->magic != SPINLOCK_MAGIC)
+		BUG();
+	if (!spin_is_locked(lock))
+		BUG();
+#endif
+	__asm__ __volatile__(
+		spin_unlock_string
+	);
+}
+
+#endif
 
 static inline int spin_trylock(spinlock_t *lock)
 {
@@ -95,18 +138,6 @@
 		:"=m" (lock->lock) : : "memory");
 }
 
-static inline void spin_unlock(spinlock_t *lock)
-{
-#if SPINLOCK_DEBUG
-	if (lock->magic != SPINLOCK_MAGIC)
-		BUG();
-	if (!spin_is_locked(lock))
-		BUG();
-#endif
-	__asm__ __volatile__(
-		spin_unlock_string
-		:"=m" (lock->lock) : : "memory");
-}
 
 /*
  * Read-write spinlocks, allowing multiple readers

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)