patch-2.3.48 linux/include/asm-mips/semaphore-helper.h
Next file: linux/include/asm-mips/semaphore.h
Previous file: linux/include/asm-mips/segment.h
Back to the patch index
Back to the overall index
- Lines: 255
- Date:
Thu Feb 24 22:52:30 2000
- Orig file:
v2.3.47/linux/include/asm-mips/semaphore-helper.h
- Orig date:
Mon Jul 19 13:12:47 1999
diff -u --recursive --new-file v2.3.47/linux/include/asm-mips/semaphore-helper.h linux/include/asm-mips/semaphore-helper.h
@@ -1,13 +1,16 @@
-/* $Id: semaphore-helper.h,v 1.3 1999/06/11 14:30:15 ralf Exp $
+/* $Id: semaphore-helper.h,v 1.7 1999/10/21 00:23:05 ralf Exp $
*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999 Andrea Arcangeli
* (C) Copyright 1999 Ralf Baechle
+ * (C) Copyright 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_MIPS_SEMAPHORE_HELPER_H
-#define __ASM_MIPS_SEMAPHORE_HELPER_H
+#ifndef _ASM_SEMAPHORE_HELPER_H
+#define _ASM_SEMAPHORE_HELPER_H
+
+#include <linux/config.h>
/*
* These two _must_ execute atomically wrt each other.
@@ -17,17 +20,77 @@
atomic_inc(&sem->waking);
}
+#if !defined(CONFIG_CPU_HAS_LLSC)
+
+/*
+ * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
+ * Do it once and that's it. ll/sc *has* it's advantages. HK
+ */
+#define read(a) ((a)->counter)
+#define inc(a) (((a)->counter)++)
+#define dec(a) (((a)->counter)--)
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ save_and_cli(flags);
+ if (read(&sem->waking) > 0) {
+ dec(&sem->waking);
+ ret = 1;
+ }
+ restore_flags(flags);
+ return ret;
+}
+
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if (read(&sem->waking) > 0) {
+ dec(&sem->waking);
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ inc(&sem->count);
+ ret = -EINTR;
+ }
+ restore_flags(flags);
+ return ret;
+}
+
+static inline int waking_non_zero_trylock(struct semaphore *sem)
+{
+ int ret = 1;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if (read(&sem->waking) <= 0)
+ inc(&sem->count);
+ else {
+ dec(&sem->waking);
+ ret = 0;
+ }
+ restore_flags(flags);
+ return ret;
+}
+
+#else /* CONFIG_CPU_HAS_LLSC */
+
static inline int
waking_non_zero(struct semaphore *sem)
{
int ret, tmp;
__asm__ __volatile__(
- "1:\tll\t%1,%2\n\t"
- "blez\t%1,2f\n\t"
- "subu\t%0,%1,1\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
+ "1:\tll\t%1, %2\n\t"
+ "blez\t%1, 2f\n\t"
+ "subu\t%0, %1, 1\n\t"
+ "sc\t%0, %2\n\t"
+ "beqz\t%0, 1b\n\t"
"2:"
".text"
: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
@@ -48,78 +111,113 @@
*
* This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
*
- * This is crazy. Normally it stricly forbidden to use 64-bit operation
+ * This is crazy. Normally it stricly forbidden to use 64-bit operations
* in the 32-bit MIPS kernel. In this case it's however ok because if an
* interrupt has destroyed the upper half of registers sc will fail.
+ * Note also that this will not work for MIPS32 CPUS!
+ *
+ * Pseudocode:
+ *
+ * If(sem->waking > 0) {
+ * Decrement(sem->waking)
+ * Return(SUCCESS)
+ * } else If(segnal_pending(tsk)) {
+ * Increment(sem->count)
+ * Return(-EINTR)
+ * } else {
+ * Return(SLEEP)
+ * }
*/
+
static inline int
waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
{
long ret, tmp;
#ifdef __MIPSEB__
+
__asm__ __volatile__("
- .set mips3
.set push
+ .set mips3
.set noat
-0: lld %1,%2
- li %0,0
+0: lld %1, %2
+ li %0, 0
+ sll $1, %1, 0
+ blez $1, 1f
+ daddiu %1, %1, -1
+ li %0, 1
+ b 2f
+1:
+ beqz %3, 2f
+ li %0, %4
+ dli $1, 0x0000000100000000
+ daddu %1, %1, $1
+2:
+ scd %1, %2
+ beqz %1, 0b
+
+ .set pop"
+ : "=&r"(ret), "=&r"(tmp), "=m"(*sem)
+ : "r"(signal_pending(tsk)), "i"(-EINTR));
+
+#elif defined(__MIPSEL__)
- bltz %1, 1f
- dli $1, 0xffffffff00000000
- daddu %1, $1
+ __asm__ __volatile__("
+ .set mips3
+ .set push
+ .set noat
+0:
+ lld %1, %2
+ li %0, 0
+ blez %1, 1f
+ dli $1, 0x0000000100000000
+ dsubu %1, %1, $1
li %0, 1
b 2f
1:
-
- beqz %3, 1f
- addiu $1, %1, 1
+ beqz %3, 2f
+ li %0, %4
+ /*
+ * It would be nice to assume that sem->count
+ * is != -1, but we will guard against that case
+ */
+ daddiu $1, %1, 1
dsll32 $1, $1, 0
dsrl32 $1, $1, 0
dsrl32 %1, %1, 0
dsll32 %1, %1, 0
- or %1, $1
- li %0, %4
- b 2f
-1:
- scd %1, %2
+ or %1, %1, $1
2:
- beqz %1,0b
+ scd %1, %2
+ beqz %1, 0b
+
.set pop
.set mips0"
: "=&r"(ret), "=&r"(tmp), "=m"(*sem)
: "r"(signal_pending(tsk)), "i"(-EINTR));
-#endif
-#ifdef __MIPSEL__
-#error "FIXME: waking_non_zero_interruptible doesn't support little endian machines yet."
+#else
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
#endif
return ret;
}
/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * XXX SMP ALERT
+ * waking_non_zero_trylock is unused. we do everything in
+ * down_trylock and let non-ll/sc hosts bounce around.
*/
-#ifdef __SMP__
-#error FIXME, waking_non_zero_trylock is broken for SMP.
-#endif
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- if (atomic_read(&sem->waking) <= 0)
- atomic_inc(&sem->count);
- else {
- atomic_dec(&sem->waking);
- ret = 0;
- }
+static inline int
+waking_non_zero_trylock(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
- return ret;
+ return 0;
}
-#endif /* __ASM_MIPS_SEMAPHORE_HELPER_H */
+#endif /* CONFIG_CPU_HAS_LLSC */
+
+#endif /* _ASM_SEMAPHORE_HELPER_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)