patch-2.3.50 linux/include/asm-sh/semaphore.h

Next file: linux/include/asm-sh/sembuf.h
Previous file: linux/include/asm-sh/semaphore-helper.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.49/linux/include/asm-sh/semaphore.h linux/include/asm-sh/semaphore.h
@@ -9,7 +9,7 @@
  * (C) Copyright 1996 Linus Torvalds
  *
  * SuperH verison by Niibe Yutaka
- *
+ *  (Currently no asm implementation but generic C code...)
  */
 
 #include <linux/spinlock.h>
@@ -19,7 +19,7 @@
 
 struct semaphore {
 	atomic_t count;
-	int waking;
+	int sleepers;
 	wait_queue_head_t wait;
 #if WAITQUEUE_DEBUG
 	long __magic;
@@ -55,7 +55,7 @@
  * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
  */
 	atomic_set(&sem->count, val);
-	sem->waking = 0;
+	sem->sleepers = 0;
 	init_waitqueue_head(&sem->wait);
 #if WAITQUEUE_DEBUG
 	sem->__magic = (int)&sem->__magic;
@@ -72,15 +72,20 @@
 	sema_init(sem, 0);
 }
 
+#if 0
 asmlinkage void __down_failed(void /* special register calling convention */);
 asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
 asmlinkage int  __down_failed_trylock(void  /* params in registers */);
 asmlinkage void __up_wakeup(void /* special register calling convention */);
+#endif
 
 asmlinkage void __down(struct semaphore * sem);
 asmlinkage int  __down_interruptible(struct semaphore * sem);
 asmlinkage int  __down_trylock(struct semaphore * sem);
 asmlinkage void __up(struct semaphore * sem);
+extern struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry);
+extern struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry);
+asmlinkage struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem);
 
 extern spinlock_t semaphore_wake_lock;
 
@@ -129,6 +134,151 @@
 #endif
 	if (atomic_inc_return(&sem->count) <= 0)
 		__up(sem);
+}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * SuperH version by Niibe Yutaka
+ */
+struct rw_semaphore {
+	atomic_t		count;
+	volatile unsigned char	write_bias_granted;
+	volatile unsigned char	read_bias_granted;
+	volatile unsigned char	pad1;
+	volatile unsigned char	pad2;
+	wait_queue_head_t	wait;
+	wait_queue_head_t	write_bias_wait;
+#if WAITQUEUE_DEBUG
+	long			__magic;
+	atomic_t		readers;
+	atomic_t		writers;
+#endif
+};
+
+#define RW_LOCK_BIAS		 0x01000000
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT	, ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+	__WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+	__SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+	atomic_set(&sem->count, RW_LOCK_BIAS);
+	sem->read_bias_granted = 0;
+	sem->write_bias_granted = 0;
+	init_waitqueue_head(&sem->wait);
+	init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+	sem->__magic = (long)&sem->__magic;
+	atomic_set(&sem->readers, 0);
+	atomic_set(&sem->writers, 0);
+#endif
+}
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+	int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+	if (sem->__magic != (long)&sem->__magic)
+		BUG();
+#endif
+	if ((new = atomic_dec_return(&sem->count)) < 0)
+		__down_read(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->writers))
+		BUG();
+	atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+	int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+	if (sem->__magic != (long)&sem->__magic)
+		BUG();
+#endif
+	if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count)) != 0)
+		__down_write(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+	if (atomic_read(&sem->writers))
+		BUG();
+	if (atomic_read(&sem->readers))
+		BUG();
+	if (sem->read_bias_granted)
+		BUG();
+	if (sem->write_bias_granted)
+		BUG();
+	atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void __up_read(struct rw_semaphore *sem)
+{
+	if (atomic_inc_return(&sem->count) == 0)
+		__rwsem_wake(sem);
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void __up_write(struct rw_semaphore *sem)
+{
+	int saved = atomic_read(&sem->count), new;
+
+	new = atomic_add_return(RW_LOCK_BIAS, &sem->count);
+	if (saved < 0 && new >= 0)
+		__rwsem_wake(sem);
+}
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->writers))
+		BUG();
+	atomic_dec(&sem->readers);
+#endif
+	__up_read(sem);
+}
+
+extern inline void up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+	if (sem->read_bias_granted)
+		BUG();
+	if (sem->write_bias_granted)
+		BUG();
+	if (atomic_read(&sem->readers))
+		BUG();
+	if (atomic_read(&sem->writers) != 1)
+		BUG();
+	atomic_dec(&sem->writers);
+#endif
+	__up_write(sem);
 }
 
 #endif /* __ASM_SH_SEMAPHORE_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)