patch-2.3.99-pre9 linux/include/asm-mips64/smplock.h

Next file: linux/include/asm-mips64/sn/addrs.h
Previous file: linux/include/asm-mips64/smp.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre8/linux/include/asm-mips64/smplock.h linux/include/asm-mips64/smplock.h
@@ -0,0 +1,56 @@
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#ifndef _ASM_SMPLOCK_H
+#define _ASM_SMPLOCK_H
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+#define kernel_locked()			spin_is_locked(&kernel_flag)
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
+{
+	if (task->lock_depth >= 0)
+		spin_unlock(&kernel_flag);
+	release_irqlock(cpu);
+	__sti();
+}
+
+/*
+ * Re-acquire the kernel lock
+ */
+static __inline__ void reacquire_kernel_lock(struct task_struct *task)
+{
+	if (task->lock_depth >= 0)
+		spin_lock(&kernel_flag);
+}
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void lock_kernel(void)
+{
+	if (!++current->lock_depth)
+		spin_lock(&kernel_flag);
+}
+
+static __inline__ void unlock_kernel(void)
+{
+	if (--current->lock_depth < 0)
+		spin_unlock(&kernel_flag);
+}
+
+#endif /* _ASM_SMPLOCK_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)