patch-2.3.50 linux/arch/sh/kernel/semaphore.c

Next file: linux/arch/sh/kernel/setup.c
Previous file: linux/arch/sh/kernel/process.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.49/linux/arch/sh/kernel/semaphore.c linux/arch/sh/kernel/semaphore.c
@@ -8,6 +8,8 @@
  */
 
 #include <linux/sched.h>
+#include <linux/wait.h>
+#include <asm/semaphore.h>
 #include <asm/semaphore-helper.h>
 
 /*
@@ -130,4 +132,163 @@
 int __down_trylock(struct semaphore * sem)
 {
 	return waking_non_zero_trylock(sem);
+}
+
+/* Called when someone has done an up that transitioned from
+ * negative to non-negative, meaning that the lock has been
+ * granted to whomever owned the bias.
+ */
+struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem)
+{
+	if (xchg(&sem->read_bias_granted, 1))
+		BUG();
+	wake_up(&sem->wait);
+	return sem;
+}
+
+struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem)
+{
+	if (xchg(&sem->write_bias_granted, 1))
+		BUG();
+	wake_up(&sem->write_bias_wait);
+	return sem;
+}
+
+struct rw_semaphore * __rwsem_wake(struct rw_semaphore *sem)
+{
+	if (atomic_read(&sem->count) == 0)
+		return rwsem_wake_writer(sem);
+	else
+		return rwsem_wake_readers(sem);
+}
+
+struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	add_wait_queue(&sem->wait, &wait);	/* put ourselves at the head of the list */
+
+	for (;;) {
+		if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
+			break;
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (!sem->read_bias_granted)
+			schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	return sem;
+}
+
+struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	add_wait_queue_exclusive(&sem->write_bias_wait, &wait);	/* put ourselves at the end of the list */
+
+	for (;;) {
+		if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
+			break;
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+		if (!sem->write_bias_granted)
+			schedule();
+	}
+
+	remove_wait_queue(&sem->write_bias_wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	/* if the lock is currently unbiased, awaken the sleepers
+	 * FIXME: this wakes up the readers early in a bit of a
+	 * stampede -> bad!
+	 */
+	if (atomic_read(&sem->count) >= 0)
+		wake_up(&sem->wait);
+
+	return sem;
+}
+
+/* Wait for the lock to become unbiased.  Readers
+ * are non-exclusive. =)
+ */
+struct rw_semaphore *down_read_failed(struct rw_semaphore *sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__up_read(sem);	/* this takes care of granting the lock */
+
+	add_wait_queue(&sem->wait, &wait);
+
+	while (atomic_read(&sem->count) < 0) {
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (atomic_read(&sem->count) >= 0)
+			break;
+		schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	return sem;
+}
+
+/* Wait for the lock to become unbiased. Since we're
+ * a writer, we'll make ourselves exclusive.
+ */
+struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__up_write(sem);	/* this takes care of granting the lock */
+
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	while (atomic_read(&sem->count) < 0) {
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+		if (atomic_read(&sem->count) >= 0)
+			break;	/* we must attempt to aquire or bias the lock */
+		schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	return sem;
+}
+
+struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry)
+{
+	if (carry) {
+		int saved, new;
+
+		do {
+			down_read_failed(sem);
+			saved = atomic_read(&sem->count);
+			if ((new = atomic_dec_return(&sem->count)) >= 0)
+				return sem;
+		} while (!(new < 0 && saved >=0));
+	}
+
+	return down_read_failed_biased(sem);
+}
+
+struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry)
+{
+	if (carry) {
+		int saved, new;
+
+		do {
+			down_write_failed(sem);
+			saved = atomic_read(&sem->count);
+			if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count) ) == 0)
+				return sem;
+		} while (!(new < 0 && saved >=0));
+	}
+
+	return down_write_failed_biased(sem);
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)