patch-2.3.30 linux/include/asm-i386/semaphore.h

Next file: linux/include/asm-i386/smp.h
Previous file: linux/include/asm-i386/rwlock.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.29/linux/include/asm-i386/semaphore.h linux/include/asm-i386/semaphore.h
@@ -30,7 +30,7 @@
 
 #include <asm/system.h>
 #include <asm/atomic.h>
-#include <linux/spinlock.h>
+#include <asm/rwlock.h>
 #include <linux/wait.h>
 
 struct semaphore {
@@ -111,10 +111,7 @@
 
 	__asm__ __volatile__(
 		"# atomic down operation\n\t"
-#ifdef __SMP__
-		"lock ; "
-#endif
-		"decl (%0)\n\t"     /* --sem->count */
+		LOCK "decl (%0)\n\t"     /* --sem->count */
 		"js 2f\n"
 		"1:\n"
 		".section .text.lock,\"ax\"\n"
@@ -136,10 +133,7 @@
 
 	__asm__ __volatile__(
 		"# atomic interruptible down operation\n\t"
-#ifdef __SMP__
-		"lock ; "
-#endif
-		"decl (%1)\n\t"     /* --sem->count */
+		LOCK "decl (%1)\n\t"     /* --sem->count */
 		"js 2f\n\t"
 		"xorl %0,%0\n"
 		"1:\n"
@@ -163,10 +157,7 @@
 
 	__asm__ __volatile__(
 		"# atomic interruptible down operation\n\t"
-#ifdef __SMP__
-		"lock ; "
-#endif
-		"decl (%1)\n\t"     /* --sem->count */
+		LOCK "decl (%1)\n\t"     /* --sem->count */
 		"js 2f\n\t"
 		"xorl %0,%0\n"
 		"1:\n"
@@ -193,10 +184,7 @@
 #endif
 	__asm__ __volatile__(
 		"# atomic up operation\n\t"
-#ifdef __SMP__
-		"lock ; "
-#endif
-		"incl (%0)\n\t"     /* ++sem->count */
+		LOCK "incl (%0)\n\t"     /* ++sem->count */
 		"jle 2f\n"
 		"1:\n"
 		".section .text.lock,\"ax\"\n"
@@ -206,6 +194,175 @@
 		:/* no outputs */
 		:"c" (sem)
 		:"memory");
+}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * The lock is initialized to BIAS.  This way, a writer
+ * subtracts BIAS ands gets 0 for the case of an uncontended
+ * lock.  Readers decrement by 1 and see a positive value
+ * when uncontended, negative if there are writers waiting
+ * (in which case it goes to sleep).
+ *
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes.  BIAS must be chosen such that subl'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ *
+ * In terms of fairness, this should result in the lock
+ * flopping back and forth between readers and writers
+ * under heavy use.
+ *
+ *		-ben
+ */
+struct rw_semaphore {
+	atomic_t		count;
+	volatile unsigned char	write_bias_granted;
+	volatile unsigned char	read_bias_granted;
+	volatile unsigned char	pad1;
+	volatile unsigned char	pad2;
+	wait_queue_head_t	wait;
+	wait_queue_head_t	write_bias_wait;
+#if WAITQUEUE_DEBUG
+	long			__magic;
+	atomic_t		readers;
+	atomic_t		writers;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT	, ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ ATOMIC_INIT(RW_LOCK_BIAS), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+	__WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+	__SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+	atomic_set(&sem->count, RW_LOCK_BIAS);
+	sem->read_bias_granted = 0;
+	sem->write_bias_granted = 0;
+	init_waitqueue_head(&sem->wait);
+	init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+	sem->__magic = (long)&sem->__magic;
+	atomic_set(&sem->readers, 0);
+	atomic_set(&sem->writers, 0);
+#endif
+}
+
+/* we use FASTCALL convention for the helpers */
+extern struct rw_semaphore *FASTCALL(down_read_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(down_write_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem));
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->__magic != (long)&sem->__magic)
+		BUG();
+#endif
+	__build_read_lock(sem, "__down_read_failed");
+#if WAITQUEUE_DEBUG
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->writers))
+		BUG();
+	atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->__magic != (long)&sem->__magic)
+		BUG();
+#endif
+	__build_write_lock(sem, "__down_write_failed");
+#if WAITQUEUE_DEBUG
+	if (atomic_read(&sem->writers))
+		BUG();
+	if (atomic_read(&sem->readers))
+		BUG();
+	if (sem->read_bias_granted)
+		BUG();
+	if (sem->write_bias_granted)
+		BUG();
+	atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void __up_read(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# up_read\n\t"
+		LOCK "incl (%%eax)\n\t"
+		"jz 2f\n"			/* only do the wake if result == 0 (ie, a writer) */
+		"1:\n\t"
+		".section .text.lock,\"ax\"\n"
+		"2:\tcall __rwsem_wake\n\t"
+		"jmp 1b\n"
+		".previous"
+		::"a" (sem)
+		:"memory"
+		);
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void __up_write(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# up_write\n\t"
+		LOCK "addl $" RW_LOCK_BIAS_STR ",(%%eax)\n"
+		"jc 2f\n"			/* only do the wake if the result was -'ve to 0/+'ve */
+		"1:\n\t"
+		".section .text.lock,\"ax\"\n"
+		"2:\tcall __rwsem_wake\n\t"
+		"jmp 1b\n"
+		".previous"
+		::"a" (sem)
+		:"memory"
+		);
+}
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->writers))
+		BUG();
+	atomic_dec(&sem->readers);
+#endif
+	__up_read(sem);
+}
+
+extern inline void up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->read_bias_granted)
+		BUG();
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->readers))
+		BUG();
+	if (atomic_read(&sem->writers) != 1)
+		BUG();
+	atomic_dec(&sem->writers);
+#endif
+	__up_write(sem);
 }
 
 #endif

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)