patch-2.4.21 linux-2.4.21/include/asm-ia64/mmu_context.h

Next file: linux-2.4.21/include/asm-ia64/offsets.h
Previous file: linux-2.4.21/include/asm-ia64/mmu.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.20/include/asm-ia64/mmu_context.h linux-2.4.21/include/asm-ia64/mmu_context.h
@@ -44,26 +44,32 @@
 {
 }
 
-static inline void
-get_new_mmu_context (struct mm_struct *mm)
+static inline mm_context_t
+get_mmu_context (struct mm_struct *mm)
 {
+	mm_context_t context = mm->context;
+
+	if (context)
+		return context;
+
 	spin_lock(&ia64_ctx.lock);
 	{
-		if (ia64_ctx.next >= ia64_ctx.limit)
-			wrap_mmu_context(mm);
-		mm->context = ia64_ctx.next++;
+		/* re-check, now that we've got the lock: */
+		context = mm->context;
+		if (context == 0) {
+			if (ia64_ctx.next >= ia64_ctx.limit)
+				wrap_mmu_context(mm);
+			mm->context = context = ia64_ctx.next++;
+		}
 	}
 	spin_unlock(&ia64_ctx.lock);
-
-}
-
-static inline void
-get_mmu_context (struct mm_struct *mm)
-{
-	if (mm->context == 0)
-		get_new_mmu_context(mm);
+	return context;
 }
 
+/*
+ * Initialize context number to some sane value.  MM is guaranteed to be a brand-new
+ * address-space, so no TLB flushing is needed, ever.
+ */
 static inline int
 init_new_context (struct task_struct *p, struct mm_struct *mm)
 {
@@ -78,13 +84,13 @@
 }
 
 static inline void
-reload_context (struct mm_struct *mm)
+reload_context (mm_context_t context)
 {
 	unsigned long rid;
 	unsigned long rid_incr = 0;
 	unsigned long rr0, rr1, rr2, rr3, rr4;
 
-	rid = mm->context << 3;	/* make space for encoding the region number */
+	rid = context << 3;	/* make space for encoding the region number */
 	rid_incr = 1 << 8;
 
 	/* encode the region id, preferred page size, and VHPT enable bit: */
@@ -103,6 +109,18 @@
 	ia64_insn_group_barrier();
 }
 
+static inline void
+activate_context (struct mm_struct *mm)
+{
+	mm_context_t context;
+
+	do {
+		context = get_mmu_context(mm);
+		reload_context(context);
+		/* in the unlikely event of a TLB-flush by another thread, redo the load: */
+	} while (unlikely(context != mm->context));
+}
+
 /*
  * Switch from address space PREV to address space NEXT.
  */
@@ -110,12 +128,11 @@
 activate_mm (struct mm_struct *prev, struct mm_struct *next)
 {
 	/*
-	 * We may get interrupts here, but that's OK because interrupt
-	 * handlers cannot touch user-space.
+	 * We may get interrupts here, but that's OK because interrupt handlers cannot
+	 * touch user-space.
 	 */
 	ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
-	get_mmu_context(next);
-	reload_context(next);
+	activate_context(next);
 }
 
 #define switch_mm(prev_mm,next_mm,next_task,cpu)	activate_mm(prev_mm, next_mm)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)