patch-2.3.48 linux/arch/ia64/lib/copy_user.S

Next file: linux/arch/mips/Makefile
Previous file: linux/arch/ia64/kernel/unaligned.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.47/linux/arch/ia64/lib/copy_user.S linux/arch/ia64/lib/copy_user.S
@@ -1,71 +1,375 @@
-/*
- * This routine copies a linear memory buffer across the user/kernel boundary.  When
- * reading a byte from the source causes a fault, the remainder of the destination
- * buffer is zeroed out.  Note that this can happen only when copying from user
- * to kernel memory and we do this to absolutely guarantee that the
- * kernel doesn't operate on random data.
- *
- * This file is derived from arch/alpha/lib/copy_user.S.
- *
- * Inputs:
- *	in0:	address of destination buffer
- *	in1:	address of source buffer
- *	in2:	length of buffer in bytes
- * Outputs:
- *	r8:	number of bytes that didn't get copied due to a fault
- * 
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#define EXI(x...)				\
-99:	x;					\
+// The label comes first because our store instruction contains a comma
+// and confuse the preprocessor otherwise
+//
+#undef DEBUG
+#ifdef DEBUG
+#define EX(y,x...)				\
+99:	x
+#else
+#define EX(y,x...)				\
 	.section __ex_table,"a";		\
-	data4 @gprel(99b);			\
-	data4 .Lexit_in-99b;			\
-	.previous
+	data4 @gprel(99f);			\
+	data4 y-99f;				\
+	.previous;				\
+99:	x
+#endif
 
-#define EXO(x...)				\
-99:	x;					\
-	.section __ex_table,"a";		\
-	data4 @gprel(99b);			\
-	data4 .Lexit_out-99b;			\
-	.previous
-
-	.text
-	.psr abi64
-	.psr lsb
-	.lsb
-
-	.align 32
-	.global __copy_user
-	.proc __copy_user
+//
+// Tuneable parameters
+//
+#define COPY_BREAK	16	// we do byte copy below (must be >=16)
+#define PIPE_DEPTH	4	// pipe depth
+
+#define EPI		p[PIPE_DEPTH-1] // PASTE(p,16+PIPE_DEPTH-1)
+
+//
+// arguments
+//
+#define dst		in0
+#define src		in1
+#define len		in2
+
+//
+// local registers
+//
+#define cnt		r18
+#define len2		r19
+#define saved_lc	r20
+#define saved_pr	r21
+#define tmp		r22
+#define val		r23
+#define src1		r24
+#define dst1		r25
+#define src2		r26
+#define dst2		r27
+#define len1		r28
+#define enddst		r29
+#define endsrc		r30
+#define saved_pfs	r31
+ 	.text
+ 	.psr	abi64
+ 	.psr	lsb
+
+ 	.align	16
+ 	.global	__copy_user
+ 	.proc	__copy_user
 __copy_user:
-	alloc r10=ar.pfs,3,0,0,0
-	mov r9=ar.lc		// save ar.lc
-	mov ar.lc=in2		// set ar.lc to length of buffer
-	br.sptk.few .Lentr
-
-	// XXX braindead copy loop---this needs to be optimized
-.Loop1:
-	EXI(ld1 r8=[in1],1)
-	;;
-	EXO(st1 [in0]=r8,1)
-.Lentr:	br.cloop.dptk.few .Loop1	// repeat unless ar.lc--==0
-	;;			// avoid RAW on ar.lc
-.Lexit_out:
-	mov r8=ar.lc		// return how many bytes we _didn't_ copy
-	mov ar.lc=r9
-	br.ret.sptk.few rp
-
-.Lexit_in:
-	// clear the remainder of the buffer:
-	mov r8=ar.lc	// return how many bytes we _didn't_ copy
-.Loop2:
-	st1 [in0]=r0,1	// this cannot fault because we get here only on user->kernel copies
-	br.cloop.dptk.few .Loop2
-	;;				// avoid RAW on ar.lc
-	mov ar.lc=r9
-	br.ret.sptk.few rp
+	alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
+
+	.rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH]
+	.rotp p[PIPE_DEPTH]
+
+	adds len2=-1,len	// br.ctop is repeat/until
+	mov ret0=r0
+
+	;;			// RAW of cfm when len=0
+	cmp.eq p8,p0=r0,len	// check for zero length
+	mov saved_lc=ar.lc	// preserve ar.lc (slow)
+(p8)	br.ret.spnt.few rp	// empty mempcy()
+	;;
+	add enddst=dst,len	// first byte after end of source
+	add endsrc=src,len	// first byte after end of destination
+	mov saved_pr=pr		// preserve predicates
+
+	mov dst1=dst		// copy because of rotation
+	mov ar.ec=PIPE_DEPTH
+	mov pr.rot=1<<16	// p16=true all others are false
+
+	mov src1=src		// copy because of rotation
+	mov ar.lc=len2		// initialize lc for small count
+	cmp.lt p10,p7=COPY_BREAK,len	// if len > COPY_BREAK then long copy 
+
+	xor tmp=src,dst		// same alignment test prepare
+(p10)	br.cond.dptk.few long_memcpy
+	;;			// RAW pr.rot/p16 ?
+	//
+	// Now we do the byte by byte loop with software pipeline
+	//
+	// p7 is necessarily false by now
+1:				
+	EX(failure_in_pipe1,(p16) ld1 val1[0]=[src1],1)
+
+	EX(failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+	br.ctop.dptk.few 1b
+	;;
+	mov ar.lc=saved_lc
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.pfs=saved_pfs		// restore ar.ec
+	br.ret.sptk.few rp	// end of short memcpy
+
+	//
+	// Beginning of long mempcy (i.e. > 16 bytes)
+	//
+long_memcpy:
+	tbit.nz p6,p7=src1,0	// odd alignement
+	and tmp=7,tmp
+	;;
+	cmp.eq p10,p8=r0,tmp
+	mov len1=len		// copy because of rotation
+(p8)	br.cond.dpnt.few 1b	// XXX Fixme. memcpy_diff_align 
+	;;
+	// At this point we know we have more than 16 bytes to copy
+	// and also that both src and dest have the same alignment
+	// which may not be the one we want. So for now we must move
+	// forward slowly until we reach 16byte alignment: no need to
+	// worry about reaching the end of buffer.
+	//
+	EX(failure_in1,(p6) ld1 val1[0]=[src1],1)	// 1-byte aligned
+(p6)	adds len1=-1,len1;;
+	tbit.nz p7,p0=src1,1
+	;;
+	EX(failure_in1,(p7) ld2 val1[1]=[src1],2)	// 2-byte aligned
+(p7)	adds len1=-2,len1;;
+	tbit.nz p8,p0=src1,2
+	;;
+	//
+	// Stop bit not required after ld4 because if we fail on ld4
+	// we have never executed the ld1, therefore st1 is not executed.
+	//
+	EX(failure_in1,(p8) ld4 val2[0]=[src1],4)	// 4-byte aligned
+	EX(failure_out,(p6) st1 [dst1]=val1[0],1)
+	tbit.nz p9,p0=src1,3
+	;;
+	//
+	// Stop bit not required after ld8 because if we fail on ld8
+	// we have never executed the ld2, therefore st2 is not executed.
+	//
+	EX(failure_in1,(p9) ld8 val2[1]=[src1],8)	// 8-byte aligned
+	EX(failure_out,(p7) st2 [dst1]=val1[1],2)
+(p8)	adds len1=-4,len1
+	;;
+	EX(failure_out, (p8) st4 [dst1]=val2[0],4)
+(p9)	adds len1=-8,len1;;
+	shr.u cnt=len1,4		// number of 128-bit (2x64bit) words
+	;;
+	EX(failure_out, (p9) st8 [dst1]=val2[1],8)
+	tbit.nz p6,p0=len1,3	
+	cmp.eq p7,p0=r0,cnt
+	adds tmp=-1,cnt			// br.ctop is repeat/until
+(p7)	br.cond.dpnt.few .dotail	// we have less than 16 bytes left
+	;;
+	adds src2=8,src1	
+	adds dst2=8,dst1
+	mov ar.lc=tmp
+	;;
+	//
+	// 16bytes/iteration
+	//
+2:
+	EX(failure_in3,(p16) ld8 val1[0]=[src1],16)
+(p16)	ld8 val2[0]=[src2],16
+
+	EX(failure_out, (EPI)	st8 [dst1]=val1[PIPE_DEPTH-1],16)
+(EPI)	st8 [dst2]=val2[PIPE_DEPTH-1],16
+	br.ctop.dptk.few 2b
+	;;			// RAW on src1 when fall through from loop
+	//
+	// Tail correction based on len only
+	//
+	// No matter where we come from (loop or test) the src1 pointer
+	// is 16 byte aligned AND we have less than 16 bytes to copy.
+	//
+.dotail:			
+	EX(failure_in1,(p6) ld8 val1[0]=[src1],8)	// at least 8 bytes
+	tbit.nz p7,p0=len1,2
+	;;
+	EX(failure_in1,(p7) ld4 val1[1]=[src1],4)	// at least 4 bytes
+ 	tbit.nz p8,p0=len1,1
+	;;
+	EX(failure_in1,(p8) ld2 val2[0]=[src1],2)	// at least 2 bytes
+	tbit.nz p9,p0=len1,0
+	;;
+	EX(failure_out, (p6) st8 [dst1]=val1[0],8)
+	;;
+	EX(failure_in1,(p9) ld1 val2[1]=[src1])		// only 1 byte left
+	mov ar.lc=saved_lc
+	;;
+	EX(failure_out,(p7) st4 [dst1]=val1[1],4)
+	mov pr=saved_pr,0xffffffffffff0000
+	;;
+	EX(failure_out, (p8)	st2 [dst1]=val2[0],2)
+	mov ar.pfs=saved_pfs
+	;;
+	EX(failure_out, (p9)	st1 [dst1]=val2[1])
+	br.ret.dptk.few rp
+
+
+
+	//
+	// Here we handle the case where the byte by byte copy fails
+	// on the load.
+	// Several factors make the zeroing of the rest of the buffer kind of
+	// tricky:
+	//	- the pipeline: loads/stores are not in sync (pipeline)
+	//
+	//	  In the same loop iteration, the dst1 pointer does not directly
+	//	  reflect where the faulty load was.
+	//	  
+	//	- pipeline effect
+	//	  When you get a fault on load, you may have valid data from
+	//	  previous loads not yet store in transit. Such data must be
+	//	  store normally before moving onto zeroing the rest.
+	//
+	//	- single/multi dispersal independence.
+	//
+	// solution:
+	//	- we don't disrupt the pipeline, i.e. data in transit in
+	//	  the software pipeline will be eventually move to memory.
+	//	  We simply replace the load with a simple mov and keep the
+	//	  pipeline going. We can't really do this inline because 
+	//	  p16 is always reset to 1 when lc > 0.
+	//
+failure_in_pipe1:
+	sub ret0=endsrc,src1	// number of bytes to zero, i.e. not copied
+1:
+(p16)	mov val1[0]=r0
+(EPI)	st1 [dst1]=val1[PIPE_DEPTH-1],1
+	br.ctop.dptk.few 1b
+	;;
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.lc=saved_lc
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+
+
+	//
+	// Here we handle the head & tail part when we check for alignment.
+	// The following code handles only the load failures. The
+	// main diffculty comes from the fact that loads/stores are
+	// scheduled. So when you fail on a load, the stores corresponding
+	// to previous successful loads must be executed.
+	//
+	// However some simplifications are possible given the way
+	// things work.
+	// 
+	// 1) HEAD
+	// Theory of operation:
+	//
+	//  Page A   | Page B
+	//  ---------|-----
+	//          1|8 x
+	//	  1 2|8 x
+	//	    4|8 x
+	//	  1 4|8 x
+	//        2 4|8 x
+	//      1 2 4|8 x
+	//	     |1
+	//	     |2 x
+	//	     |4 x
+	//
+	// page_size >= 4k (2^12).  (x means 4, 2, 1)
+	// Here we suppose Page A exists and Page B does not.
+	//
+	// As we move towards eight byte alignment we may encounter faults.
+	// The numbers on each page show the size of the load (current alignment).
+	//
+	// Key point:
+	//	- if you fail on 1, 2, 4 then you have never executed any smaller
+	//	  size loads, e.g. failing ld4 means no ld1 nor ld2 executed 
+	//	  before.
+	//
+	// This allows us to simplify the cleanup code, because basically you
+	// only have to worry about "pending" stores in the case of a failing
+	// ld8(). Given the way the code is written today, this means only 
+	// worry about st2, st4. There we can use the information encapsulated
+	// into the predicates.
+	// 
+	// Other key point:
+	// 	- if you fail on the ld8 in the head, it means you went straight
+	//	  to it, i.e. 8byte alignment within an unexisting page.
+	// Again this comes from the fact that if you crossed just for the the ld8 then
+	// you are 8byte aligned but also 16byte align, therefore you would
+	// either go for the 16byte copy loop OR the ld8 in the tail part.
+	// The combination ld1, ld2, ld4, ld8 where you fail on ld8 is impossible
+	// because it would mean you had 15bytes to copy in which case you 
+	// would have defaulted to the byte by byte copy.
+	//
+	//
+	// 2) TAIL
+	// Here we now we have less than 16 bytes AND we are either 8 or 16 byte
+	// aligned.
+	//
+	// Key point:
+	// This means that we either:
+	//		- are right on a page boundary
+	//	OR 
+	//		- are at more than 16 bytes from a page boundary with 
+	//		  at most 15 bytes to copy: no chance of crossing.
+	//
+	// This allows us to assume that if we fail on a load we haven't possibly
+	// executed any of the previous (tail) ones, so we don't need to do 
+	// any stores. For instance, if we fail on ld2, this means we had 
+	// 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4.
+	//
+	// This means that we are in a situation similar the a fault in the 
+	// head part. That's nice! 
+	// 
+failure_in1:
+//	sub ret0=enddst,dst1	// number of bytes to zero, i.e. not copied
+//	sub len=enddst,dst1,1
+	sub ret0=endsrc,src1	// number of bytes to zero, i.e. not copied
+	sub len=endsrc,src1,1
+	//
+	// we know that ret0 can never be zero at this point
+	// because we failed why trying to do a load, i.e. there is still
+	// some work to do.
+	// The failure_in1bis and length problem is taken care of at the
+	// calling side.
+	//
+	;;
+failure_in1bis:			// from (failure_in3)
+	mov ar.lc=len		// Continue with a stupid byte store.
+	;;
+5:
+	st1 [dst1]=r0,1
+	br.cloop.dptk.few 5b	
+	;;
+skip_loop:
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.lc=saved_lc
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+
+	//
+	// Here we simply restart the loop but instead
+	// of doing loads we fill the pipeline with zeroes
+	// We can't simply store r0 because we may have valid 
+	// data in transit in the pipeline.
+	// ar.lc and ar.ec are setup correctly at this point
+	//
+	// we MUST use src1/endsrc here and not dst1/enddst because
+	// of the pipeline effect.
+	//
+failure_in3:
+	sub ret0=endsrc,src1	// number of bytes to zero, i.e. not copied
+	;;
+2:
+(p16)	mov val1[0]=r0
+(p16)	mov val2[0]=r0
+(EPI)	st8 [dst1]=val1[PIPE_DEPTH-1],16
+(EPI)	st8 [dst2]=val2[PIPE_DEPTH-1],16
+	br.ctop.dptk.few 2b
+	;;
+	cmp.ne p6,p0=dst1,enddst	// Do we need to finish the tail ?
+	sub len=enddst,dst1,1		// precompute len
+(p6)	br.cond.dptk.few failure_in1bis	
+	;;
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.lc=saved_lc
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+
+	//
+	// handling of failures on stores: that's the easy part
+	//
+failure_out:
+	sub ret0=enddst,dst1
+	mov pr=saved_pr,0xffffffffffff0000
+	mov ar.lc=saved_lc
+
+	mov ar.pfs=saved_pfs
+	br.ret.dptk.few rp
+
+
+ 	.endp __copy_user
 
-	.endp __copy_user

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)