patch-1.3.4 linux/include/asm-sparc/bitops.h

Next file: linux/include/asm-sparc/bugs.h
Previous file: linux/include/asm-sparc/auxio.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v1.3.3/linux/include/asm-sparc/bitops.h linux/include/asm-sparc/bitops.h
@@ -1,8 +1,11 @@
 #ifndef _SPARC_BITOPS_H
 #define _SPARC_BITOPS_H
 
+#include <linux/kernel.h>
+#include <asm/system.h>
+
 /*
- * Copyright 1994, David S. Miller (davem@caip.rutgers.edu).
+ * Copyright 1995, David S. Miller (davem@caip.rutgers.edu).
  */
 
 
@@ -17,105 +20,259 @@
  * for sun4m (ie. SMP) no doubt.
  */
 
-extern __inline__ unsigned int set_bit(unsigned int nr, void *addr)
+/* These routines now do things in little endian byte order. */
+
+/* Our unsigned long accesses on the Sparc look like this:
+ * Big Endian:
+ *    byte 0    byte 1      byte 2    byte 3
+ *  0000 0000  0000 0000  0000 0000  0000 0000
+ *  31     24  23     16  15      8  7       0
+ *
+ * We want to set the bits in a little-endian fashion:
+ * Little Endian:
+ *    byte 3    byte 2      byte 1    byte 0
+ *  0000 0000  0000 0000  0000 0000  0000 0000
+ *  31     24  23     16  15      8  7       0
+ */
+
+/* #define LITTLE_ENDIAN_BITOPS */
+
+extern __inline__ unsigned int set_bit(unsigned int nr, void *vaddr)
 {
-  register unsigned long retval, tmp, mask, psr;
 
-  __asm__ __volatile__("or %%g0, 0x1, %3\n\t"     /* produce the mask */
-		       "sll %3, %4, %3\n\t"
-		       "rd %%psr, %5\n\t"         /* read the psr */
-		       "wr %5, 0x20, %%psr\n\t"   /* traps disabled */
-		       "ld [%1], %2\n\t"          /* critical section */
-		       "and %3, %2, %0\n\t"
-		       "or  %3, %2, %2\n\t"
-		       "st  %2, [%1]\n\t"
-		       "wr %5, 0x0, %%psr\n\t" :  /* re-enable traps */
-                       "=r" (retval) :
-                       "r" (addr), "r" (tmp=0), "r" (mask=0),
-                       "r" (nr), "r" (psr=0));
 
-  return retval; /* confuse gcc :-) */
+#ifdef LITTLE_ENDIAN_BITOPS
+
+
+        int retval;
+        unsigned char *addr = (unsigned char *)vaddr;
+	unsigned char mask;
+#ifndef TEST_BITOPS
+        unsigned long flags;
+#endif
+
+        addr += nr >> 3;
+        mask = 1 << (nr & 0x7);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+        retval = (mask & *addr) != 0;
+        *addr |= mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+        return retval;
+
+#else  /* BIG ENDIAN BITOPS */
+
+
+	int retval;
+	unsigned long *addr = vaddr;
+	unsigned long mask;
+#ifndef TEST_BITOPS
+	unsigned long flags;
+#endif
 
+	addr += nr>>5;
+	mask = 1 << (nr&31);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+	retval = (mask & *addr) != 0;
+	*addr |= mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+	return retval;
+
+
+#endif
 }
 
-extern __inline__ unsigned int clear_bit(unsigned int nr, void *addr)
+extern __inline__ unsigned int clear_bit(unsigned int nr, void *vaddr)
 {
-  register unsigned long retval, tmp, mask, psr;
+#ifdef LITTLE_ENDIAN_BITOPS
+
+
+        int retval;
+        unsigned char *addr = (unsigned char *)vaddr;
+	unsigned char mask;
+#ifndef TEST_BITOPS
+        unsigned long flags;
+#endif
+
+        addr += nr >> 3;
+        mask = 1 << (nr & 7);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+        retval = (mask & *addr) != 0;
+        *addr &= ~mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+        return retval;
+
 
-  __asm__ __volatile__("or %%g0, 0x1, %3\n\t"
-		       "sll %3, %4, %3\n\t"
-		       "rd %%psr, %5\n\t"
-		       "wr %5, 0x20, %%psr\n\t"   /* disable traps */
-                       "ld [%1], %2\n\t"
-		       "and %2, %3, %0\n\t"       /* get old bit */
-		       "andn %2, %3, %2\n\t"      /* set new val */
-		       "st  %2, [%1]\n\t"
-		       "wr %5, 0x0, %%psr\n\t" :  /* enable traps */
-		       "=r" (retval) :
-		       "r" (addr), "r" (tmp=0), "r" (mask=0),
-		       "r" (nr), "r" (psr=0));
+#else   /* BIG ENDIAN BITOPS */
 
-  return retval; /* confuse gcc ;-) */
 
+	int retval;
+	unsigned long *addr = vaddr;
+	unsigned long mask;
+#ifndef TEST_BITOPS
+	unsigned long flags;
+#endif
+
+	addr += nr>>5;
+	mask = 1 << (nr&31);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+	retval = (mask & *addr) != 0;
+	*addr &= ~mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+	return retval;
+
+
+#endif
 }
 
-extern __inline__ unsigned int change_bit(unsigned int nr, void *addr)
+extern __inline__ unsigned int change_bit(unsigned int nr, void *vaddr)
 {
-  register unsigned long retval, tmp, mask, psr;
+#ifdef LITTLE_ENDIAN_BITOPS
+
+
+        int retval;
+        unsigned char *addr = (unsigned char *)vaddr;
+	unsigned char mask;
+#ifndef TEST_BITOPS
+        unsigned long flags;
+#endif
+
+        addr += nr >> 3;
+        mask = 1 << (nr & 7);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+        retval = (mask & *addr) != 0;
+        *addr ^= mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+        return retval;
 
-  __asm__ __volatile__("or %%g0, 0x1, %3\n\t"
-		       "sll %3, %4, %3\n\t"
-		       "rd %%psr, %5\n\t"
-		       "wr %5, 0x20, %%psr\n\t"   /* disable traps */
-                       "ld [%1], %2\n\t"
-		       "and %3, %2, %0\n\t"       /* get old bit val */
-		       "xor %3, %2, %2\n\t"       /* set new val */
-		       "st  %2, [%1]\n\t"
-		       "wr %5, 0x0, %%psr\n\t" :  /* enable traps */
-		       "=r" (retval) :
-		       "r" (addr), "r" (tmp=0), "r" (mask=0),
-		       "r" (nr), "r" (psr=0));
 
-  return retval; /* confuse gcc ;-) */
+#else   /* BIG ENDIAN BITOPS */
 
+
+	int retval;
+	unsigned long *addr = vaddr;
+	unsigned long mask;
+#ifndef TEST_BITOPS
+	unsigned long flags;
+#endif
+
+	addr += nr>>5;
+	mask = 1 << (nr&31);
+
+#ifndef TEST_BITOPS
+	save_flags(flags);
+	cli();
+#endif
+
+	retval = (mask & *addr) != 0;
+	*addr ^= mask;
+
+#ifndef TEST_BITOPS
+	restore_flags(flags);
+#endif
+
+	return retval;
+
+
+#endif
 }
 
 /* The following routine need not be atomic. */
 
-extern __inline__ unsigned int test_bit(int nr, void *addr)
+extern __inline__ unsigned int test_bit(int nr, void *vaddr)
 {
-  register unsigned long retval, tmp;
+#ifdef LITTLE_ENDIAN_BITOPS
+
+        unsigned char mask;
+        unsigned char *addr = (unsigned char *)vaddr;
+
+        addr += nr >> 3;
+        mask = 1 << (nr & 7);
+        return ((mask & *addr) != 0);
+
+#else   /* BIG ENDIAN BITOPS */
 
-  __asm__ __volatile__("ld [%1], %2\n\t"
-		       "or %%g0, 0x1, %0\n\t"
-		       "sll %0, %3, %0\n\t"
-		       "and %0, %2, %0\n\t" :
-		       "=r" (retval) :
-		       "r" (addr), "r" (tmp=0),
-		       "r" (nr));
+	unsigned long mask;
+	unsigned long *addr = vaddr;
 
-  return retval; /* confuse gcc :> */
+	addr += (nr>>5);
+	mask = 1 << (nr&31);
+	return ((mask & *addr) != 0);
 
+#endif
 }
 
 /* There has to be a faster way to do this, sigh... */
 
 extern __inline__ unsigned long ffz(unsigned long word)
 {
-  register unsigned long cnt, tmp, tmp2;
+  register unsigned long cnt;
 
   cnt = 0;
 
-  __asm__("or %%g0, %3, %2\n\t"
-	  "1: and %2, 0x1, %1\n\t"
-	  "srl %2, 0x1, %2\n\t"
-	  "cmp %1, 0\n\t"
-	  "bne,a 1b\n\t"
-	  "add %0, 0x1, %0\n\t" :
-	  "=r" (cnt) :
-	  "r" (tmp=0), "r" (tmp2=0), "r" (word));
+#ifdef LITTLE_ENDIAN_BITOPS
 
+  for(int byte_bit = 24; byte_bit >=0; byte_bit -= 8)
+	  for(int bit = 0; bit<8; bit++)
+		  if((word>>(byte_bit+bit))&1)
+			  cnt++;
+		  else
+			  return cnt;
+
+#else /* BIT ENDIAN BITOPS */
+  while(cnt<32) {
+	  if(!((word>>cnt)&1))
+		  return cnt;
+	  else
+		  cnt++;
+  }
   return cnt;
+#endif
+
 }
 
 /* find_next_zero_bit() finds the first zero bit in a bit string of length
@@ -123,42 +280,49 @@
  * on Linus's ALPHA routines, which are pretty portable BTW.
  */
 
-extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+extern __inline__ unsigned long
+find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
 {
-  unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
-  unsigned long result = offset & ~31UL;
-  unsigned long tmp;
-
-  if (offset >= size)
-    return size;
-  size -= result;
-  offset &= 31UL;
-  if (offset) 
-    {
-      tmp = *(p++);
-      tmp |= ~0UL >> (32-offset);
-      if (size < 32)
-	goto found_first;
-      if (~tmp)
-	goto found_middle;
-      size -= 32;
-      result += 32;
-    }
-  while (size & ~32UL) 
-    {
-      if (~(tmp = *(p++)))
-	goto found_middle;
-      result += 32;
-      size -= 32;
-    }
-  if (!size)
-    return result;
-  tmp = *p;
+#ifdef LITTLE_ENDIAN_BITOPS
+
+	/* FOO, needs to be written */
+
+#else   /* BIG ENDIAN BITOPS */
+	unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+	unsigned long result = offset & ~31UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if (offset) 
+	{
+		tmp = *(p++);
+		tmp |= ~0UL >> (32-offset);
+		if (size < 32)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while (size & ~32UL) 
+	{
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
 
 found_first:
-  tmp |= ~0UL << size;
+	tmp |= ~0UL << size;
 found_middle:
-  return result + ffz(tmp);
+	return result + ffz(tmp);
+#endif
 }
 
 /* Linus sez that gcc can optimize the following correctly, we'll see if this

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov with Sam's (original) version
of this