patch-2.3.15 linux/arch/i386/kernel/semaphore.c

Next file: linux/arch/i386/kernel/setup.c
Previous file: linux/arch/i386/kernel/irq.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.14/linux/arch/i386/kernel/semaphore.c linux/arch/i386/kernel/semaphore.c
@@ -0,0 +1,223 @@
+/*
+ * i386 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ */
+#include <linux/sched.h>
+
+#include <asm/semaphore.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to aquire the semaphore, while the "sleeping"
+ * variable is a count of such aquires.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * "sleeping" and the contention routine ordering is
+ * protected by the semaphore spinlock.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+/*
+ * Logic:
+ *  - only on a boundary condition do we need to care. When we go
+ *    from a negative count to a non-negative, we wake people up.
+ *  - when we go from a non-negative count to a negative do we
+ *    (a) synchronize with the "sleeper" count and (b) make sure
+ *    that we're on the wakeup list before we synchronize so that
+ *    we cannot lose wakeup events.
+ */
+
+void __up(struct semaphore *sem)
+{
+	wake_up(&sem->wait);
+}
+
+static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
+
+void __down(struct semaphore * sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+	tsk->state = TASK_UNINTERRUPTIBLE;
+	add_wait_queue(&sem->wait, &wait);
+
+	spin_lock_irq(&semaphore_lock);
+	sem->sleepers++;
+	for (;;) {
+		int sleepers = sem->sleepers;
+
+		/*
+		 * Add "everybody else" into it. They aren't
+		 * playing, because we own the spinlock.
+		 */
+		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+			sem->sleepers = 0;
+			wake_up(&sem->wait);
+			break;
+		}
+		sem->sleepers = 1;	/* us - see -1 above */
+		spin_unlock_irq(&semaphore_lock);
+
+		schedule();
+		tsk->state = TASK_UNINTERRUPTIBLE;
+		spin_lock_irq(&semaphore_lock);
+	}
+	spin_unlock_irq(&semaphore_lock);
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+	int retval;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+	tsk->state = TASK_INTERRUPTIBLE;
+	add_wait_queue(&sem->wait, &wait);
+
+	spin_lock_irq(&semaphore_lock);
+	sem->sleepers ++;
+	for (;;) {
+		int sleepers = sem->sleepers;
+
+		/*
+		 * With signals pending, this turns into
+		 * the trylock failure case - we won't be
+		 * sleeping, and we* can't get the lock as
+		 * it has contention. Just correct the count
+		 * and exit.
+		 */
+		retval = -EINTR;
+		if (signal_pending(current)) {
+			sem->sleepers = 0;
+			if (atomic_add_negative(sleepers, &sem->count))
+				break;
+			wake_up(&sem->wait);
+			break;
+		}
+
+		/*
+		 * Add "everybody else" into it. They aren't
+		 * playing, because we own the spinlock. The
+		 * "-1" is because we're still hoping to get
+		 * the lock.
+		 */
+		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+			wake_up(&sem->wait);
+			retval = 0;
+			sem->sleepers = 0;
+			break;
+		}
+		sem->sleepers = 1;	/* us - see -1 above */
+		spin_unlock_irq(&semaphore_lock);
+
+		schedule();
+		tsk->state = TASK_INTERRUPTIBLE;
+		spin_lock_irq(&semaphore_lock);
+	}
+	spin_unlock_irq(&semaphore_lock);
+	tsk->state = TASK_RUNNING;
+	remove_wait_queue(&sem->wait, &wait);
+	return retval;
+}
+
+/*
+ * Trylock failed - make sure we correct for
+ * having decremented the count.
+ *
+ * We could have done the trylock with a
+ * single "cmpxchg" without failure cases,
+ * but then it wouldn't work on a 386.
+ */
+int __down_trylock(struct semaphore * sem)
+{
+	int retval, sleepers;
+
+	spin_lock_irq(&semaphore_lock);
+	sleepers = sem->sleepers + 1;
+	sem->sleepers = 0;
+
+	/*
+	 * Add "everybody else" and us into it. They aren't
+	 * playing, because we own the spinlock.
+	 */
+	if (!atomic_add_negative(sleepers, &sem->count))
+		wake_up(&sem->wait);
+
+	spin_unlock_irq(&semaphore_lock);
+	return 1;
+}
+
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ *
+ * %ecx contains the semaphore pointer on entry. Save the C-clobbered
+ * registers (%eax, %edx and %ecx) except %eax when used as a return
+ * value..
+ */
+asm(
+".align 4\n"
+".globl __down_failed\n"
+"__down_failed:\n\t"
+	"pushl %eax\n\t"
+	"pushl %edx\n\t"
+	"pushl %ecx\n\t"
+	"call __down\n\t"
+	"popl %ecx\n\t"
+	"popl %edx\n\t"
+	"popl %eax\n\t"
+	"ret"
+);
+
+asm(
+".align 4\n"
+".globl __down_failed_interruptible\n"
+"__down_failed_interruptible:\n\t"
+	"pushl %edx\n\t"
+	"pushl %ecx\n\t"
+	"call __down_interruptible\n\t"
+	"popl %ecx\n\t"
+	"popl %edx\n\t"
+	"ret"
+);
+
+asm(
+".align 4\n"
+".globl __down_failed_trylock\n"
+"__down_failed_trylock:\n\t"
+	"pushl %edx\n\t"
+	"pushl %ecx\n\t"
+	"call __down_trylock\n\t"
+	"popl %ecx\n\t"
+	"popl %edx\n\t"
+	"ret"
+);
+
+asm(
+".align 4\n"
+".globl __up_wakeup\n"
+"__up_wakeup:\n\t"
+	"pushl %eax\n\t"
+	"pushl %edx\n\t"
+	"pushl %ecx\n\t"
+	"call __up\n\t"
+	"popl %ecx\n\t"
+	"popl %edx\n\t"
+	"popl %eax\n\t"
+	"ret"
+);

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)