patch-2.4.1 linux/arch/i386/kernel/i387.c

Next file: linux/arch/i386/kernel/io_apic.c
Previous file: linux/arch/i386/defconfig
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0/linux/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
@@ -18,14 +18,6 @@
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
-#if defined(CONFIG_X86_FXSR)
-#define HAVE_FXSR 1
-#elif defined(CONFIG_X86_RUNTIME_FXSR)
-#define HAVE_FXSR (cpu_has_fxsr)
-#else
-#define HAVE_FXSR 0
-#endif
-
 #ifdef CONFIG_MATH_EMULATION
 #define HAVE_HWFP (boot_cpu_data.hard_math)
 #else
@@ -35,13 +27,13 @@
 /*
  * The _current_ task is using the FPU for the first time
  * so initialize it and set the mxcsr to its default
- * value at reset if we support FXSR and then
+ * value at reset if we support XMM instructions and then
  * remeber the current task has used the FPU.
  */
 void init_fpu(void)
 {
 	__asm__("fninit");
-	if ( HAVE_FXSR )
+	if ( cpu_has_xmm )
 		load_mxcsr(0x1f80);
 		
 	current->used_math = 1;
@@ -51,9 +43,9 @@
  * FPU lazy state save handling.
  */
 
-void save_init_fpu( struct task_struct *tsk )
+static inline void __save_init_fpu( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		asm volatile( "fxsave %0 ; fnclex"
 			      : "=m" (tsk->thread.i387.fxsave) );
 	} else {
@@ -61,12 +53,28 @@
 			      : "=m" (tsk->thread.i387.fsave) );
 	}
 	tsk->flags &= ~PF_USEDFPU;
+}
+
+void save_init_fpu( struct task_struct *tsk )
+{
+	__save_init_fpu(tsk);
 	stts();
 }
 
+void kernel_fpu_begin(void)
+{
+	struct task_struct *tsk = current;
+
+	if (tsk->flags & PF_USEDFPU) {
+		__save_init_fpu(tsk);
+		return;
+	}
+	clts();
+}
+
 void restore_fpu( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		asm volatile( "fxrstor %0"
 			      : : "m" (tsk->thread.i387.fxsave) );
 	} else {
@@ -144,7 +152,7 @@
 
 unsigned short get_fpu_cwd( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		return tsk->thread.i387.fxsave.cwd;
 	} else {
 		return (unsigned short)tsk->thread.i387.fsave.cwd;
@@ -153,7 +161,7 @@
 
 unsigned short get_fpu_swd( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		return tsk->thread.i387.fxsave.swd;
 	} else {
 		return (unsigned short)tsk->thread.i387.fsave.swd;
@@ -162,7 +170,7 @@
 
 unsigned short get_fpu_twd( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		return tsk->thread.i387.fxsave.twd;
 	} else {
 		return (unsigned short)tsk->thread.i387.fsave.twd;
@@ -171,7 +179,7 @@
 
 unsigned short get_fpu_mxcsr( struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		return tsk->thread.i387.fxsave.mxcsr;
 	} else {
 		return 0x1f80;
@@ -180,7 +188,7 @@
 
 void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		tsk->thread.i387.fxsave.cwd = cwd;
 	} else {
 		tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000);
@@ -189,7 +197,7 @@
 
 void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		tsk->thread.i387.fxsave.swd = swd;
 	} else {
 		tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000);
@@ -198,7 +206,7 @@
 
 void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
 	} else {
 		tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000);
@@ -207,7 +215,7 @@
 
 void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_xmm ) {
 		tsk->thread.i387.fxsave.mxcsr = mxcsr;
 	}
 }
@@ -321,7 +329,7 @@
 	current->used_math = 0;
 
 	if ( HAVE_HWFP ) {
-		if ( HAVE_FXSR ) {
+		if ( cpu_has_fxsr ) {
 			return save_i387_fxsave( buf );
 		} else {
 			return save_i387_fsave( buf );
@@ -354,7 +362,7 @@
 	int err;
 
 	if ( HAVE_HWFP ) {
-		if ( HAVE_FXSR ) {
+		if ( cpu_has_fxsr ) {
 			err =  restore_i387_fxsave( buf );
 		} else {
 			err = restore_i387_fsave( buf );
@@ -387,7 +395,7 @@
 int get_fpregs( struct user_i387_struct *buf, struct task_struct *tsk )
 {
 	if ( HAVE_HWFP ) {
-		if ( HAVE_FXSR ) {
+		if ( cpu_has_fxsr ) {
 			return get_fpregs_fxsave( buf, tsk );
 		} else {
 			return get_fpregs_fsave( buf, tsk );
@@ -415,7 +423,7 @@
 int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
 {
 	if ( HAVE_HWFP ) {
-		if ( HAVE_FXSR ) {
+		if ( cpu_has_fxsr ) {
 			return set_fpregs_fxsave( tsk, buf );
 		} else {
 			return set_fpregs_fsave( tsk, buf );
@@ -428,9 +436,10 @@
 
 int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
 {
-	if ( HAVE_FXSR ) {
-		__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave,
-				sizeof(struct user_fxsr_struct) );
+	if ( cpu_has_fxsr ) {
+		if (__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave,
+				    sizeof(struct user_fxsr_struct) ))
+			return -EFAULT;
 		return 0;
 	} else {
 		return -EIO;
@@ -439,7 +448,7 @@
 
 int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct *buf )
 {
-	if ( HAVE_FXSR ) {
+	if ( cpu_has_fxsr ) {
 		__copy_from_user( &tsk->thread.i387.fxsave, (void *)buf,
 				  sizeof(struct user_fxsr_struct) );
 		/* mxcsr bit 6 and 31-16 must be zero for security reasons */
@@ -485,7 +494,7 @@
 	fpvalid = tsk->used_math;
 	if ( fpvalid ) {
 		unlazy_fpu( tsk );
-		if ( HAVE_FXSR ) {
+		if ( cpu_has_fxsr ) {
 			copy_fpu_fxsave( tsk, fpu );
 		} else {
 			copy_fpu_fsave( tsk, fpu );
@@ -500,7 +509,7 @@
 	int fpvalid;
 	struct task_struct *tsk = current;
 
-	fpvalid = tsk->used_math && HAVE_FXSR;
+	fpvalid = tsk->used_math && cpu_has_fxsr;
 	if ( fpvalid ) {
 		unlazy_fpu( tsk );
 		memcpy( fpu, &tsk->thread.i387.fxsave,

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)