patch-2.3.12 linux/include/asm-i386/mmu_context.h

Next file: linux/include/asm-i386/parport.h
Previous file: linux/include/asm-i386/i82489.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.11/linux/include/asm-i386/mmu_context.h linux/include/asm-i386/mmu_context.h
@@ -2,26 +2,29 @@
 #define __I386_MMU_CONTEXT_H
 
 #include <asm/desc.h>
+#include <asm/atomic.h>
 
 /*
- * get a new mmu context.. x86's don't know much about contexts,
- * but we have to reload the new LDT in exec().
- *
- * We implement lazy MMU context-switching on x86 to optimize context
- * switches done to/from kernel threads. Kernel threads 'inherit' the
- * previous MM, so Linux doesnt have to flush the TLB. In most cases
- * we switch back to the same process so we preserve the TLB cache.
- * This all means that kernel threads have about as much overhead as
- * a function call ...
- */
-#define get_mmu_context(next) do { } while (0)
-#define set_mmu_context(prev,next) do { next->thread.cr3 = prev->thread.cr3; } while(0)
-
-#define init_new_context(mm)	do { } while(0)
-/*
  * possibly do the LDT unload here?
  */
-#define destroy_context(mm)	do { } while(0)
-#define activate_context(x)	load_LDT((x)->mm)
+#define destroy_context(mm)		do { } while(0)
+#define init_new_context(tsk,mm)	do { } while (0)
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu)
+{
+
+	if (prev != next) {
+		/*
+		 * Re-load LDT if necessary
+		 */
+		if (prev->segments != next->segments)
+			load_LDT(next);
+
+		/* Re-load page tables */
+		asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
+		clear_bit(cpu, &prev->cpu_vm_mask);
+	}
+	set_bit(cpu, &next->cpu_vm_mask);
+}
 
 #endif

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)