patch-2.3.36 linux/arch/sparc64/kernel/semaphore.c

Next file: linux/arch/sparc64/kernel/sys_sparc.c
Previous file: linux/arch/sparc64/kernel/pci.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.35/linux/arch/sparc64/kernel/semaphore.c linux/arch/sparc64/kernel/semaphore.c
@@ -1,4 +1,4 @@
-/* $Id: semaphore.c,v 1.1 1999/08/30 10:00:50 davem Exp $
+/* $Id: semaphore.c,v 1.2 1999/12/23 17:12:03 jj Exp $
  *  Generic semaphore code. Buyer beware. Do your own
  * specific changes in <asm/semaphore-helper.h>
  */
@@ -62,8 +62,7 @@
 
 #define DOWN_VAR				\
 	struct task_struct *tsk = current;	\
-	wait_queue_t wait;			\
-	init_waitqueue_entry(&wait, tsk);
+	DECLARE_WAITQUEUE(wait, tsk);
 
 #define DOWN_HEAD(task_state)						\
 									\
@@ -126,4 +125,173 @@
 int __down_trylock(struct semaphore * sem)
 {
 	return waking_non_zero_trylock(sem);
+}
+
+/* rw mutexes
+ * Implemented by Jakub Jelinek (jakub@redhat.com) based on
+ * i386 implementation by Ben LaHaise (bcrl@redhat.com).
+ */
+
+asm("
+	.text
+	.align	32
+	.globl	__down_read_failed
+__down_read_failed:
+	save		%sp, -160, %sp
+	membar		#StoreStore
+	brz,pt		%g5, 3f
+	 mov		%g7, %l0
+1:	call		down_read_failed
+	 mov		%l0, %o0
+2:	lduw		[%l0], %l1
+	sub		%l1, 1, %l2
+	cas		[%l0], %l1, %l2
+
+	cmp		%l1, %l2
+	bne,pn		%icc, 2b
+	 membar		#StoreStore
+	subcc		%l1, 1, %g0
+	bpos,pt		%icc, 4f
+	 nop
+	bcc,pn		%icc, 1b
+	 nop
+
+3:	call		down_read_failed_biased
+	 mov		%l0, %o0
+4:	ret
+	 restore
+	.previous
+");
+
+asm("
+	.text
+	.align	32
+	.globl	__down_write_failed
+__down_write_failed:
+	save		%sp, -160, %sp
+	membar		#StoreStore
+	tst		%g5
+	bge,pt		%icc, 3f
+	 mov		%g7, %l0
+1:	call		down_write_failed
+	 mov		%l0, %o0
+2:	lduw		[%l0], %l1
+	sethi		%hi (" RW_LOCK_BIAS_STR "), %l3
+	sub		%l1, %l3, %l2
+	cas		[%l0], %l1, %l2
+
+	cmp		%l1, %l2
+	bne,pn		%icc, 2b
+	 membar		#StoreStore
+	subcc		%l1, %l3, %g0
+	be,pt		%icc, 4f
+	 nop
+	bcc,pn		%icc, 1b
+	 nop
+
+3:	call		down_write_failed_biased
+	 mov		%l0, %o0
+4:	ret
+	 restore
+	.previous
+");
+
+void down_read_failed_biased(struct rw_semaphore *sem)
+{
+	DOWN_VAR
+
+	add_wait_queue(&sem->wait, &wait);	/* put ourselves at the head of the list */
+
+	for (;;) {
+		if (clear_le_bit(0, &sem->granted))
+			break;
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (!test_le_bit(0, &sem->granted))
+			schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+}
+
+void down_write_failed_biased(struct rw_semaphore *sem)
+{
+	DOWN_VAR
+
+	add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
+
+	for (;;) {
+		if (clear_le_bit(1, &sem->granted))
+			break;
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+		if (!test_le_bit(1, &sem->granted))
+			schedule();
+	}
+
+	remove_wait_queue(&sem->write_bias_wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	/* if the lock is currently unbiased, awaken the sleepers
+	 * FIXME: this wakes up the readers early in a bit of a
+	 * stampede -> bad!
+	 */
+	if (sem->count >= 0)
+		wake_up(&sem->wait);
+}
+
+/* Wait for the lock to become unbiased.  Readers
+ * are non-exclusive. =)
+ */
+void down_read_failed(struct rw_semaphore *sem)
+{
+	DOWN_VAR
+
+	__up_read(sem); /* this takes care of granting the lock */
+
+	add_wait_queue(&sem->wait, &wait);
+
+	while (sem->count < 0) {
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (sem->count >= 0)
+			break;
+		schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+}
+
+/* Wait for the lock to become unbiased. Since we're
+ * a writer, we'll make ourselves exclusive.
+ */
+void down_write_failed(struct rw_semaphore *sem)
+{
+	DOWN_VAR
+
+	__up_write(sem);	/* this takes care of granting the lock */
+
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	while (sem->count < 0) {
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+		if (sem->count >= 0)
+			break;  /* we must attempt to aquire or bias the lock */
+		schedule();
+	}
+
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+}
+
+void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers)
+{
+	if (readers) {
+		if (set_le_bit(0, &sem->granted))
+			BUG();
+		wake_up(&sem->wait);
+	} else {
+		if (set_le_bit(1, &sem->granted))
+			BUG();
+		wake_up(&sem->write_bias_wait);
+	}
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)