patch-2.3.43 linux/include/asm-ia64/atomic.h
Next file: linux/include/asm-ia64/bitops.h
Previous file: linux/include/asm-ia64/acpi-ext.h
Back to the patch index
Back to the overall index
- Lines: 101
- Date:
Thu Feb 10 12:37:22 2000
- Orig file:
v2.3.42/linux/include/asm-ia64/atomic.h
- Orig date:
Wed Dec 31 16:00:00 1969
diff -u --recursive --new-file v2.3.42/linux/include/asm-ia64/atomic.h linux/include/asm-ia64/atomic.h
@@ -0,0 +1,100 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below! The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/types.h>
+
+#include <asm/system.h>
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+
+#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old + i;
+ } while (ia64_cmpxchg(v, old, old + i, sizeof(atomic_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old - i;
+ } while (ia64_cmpxchg(v, old, new, sizeof(atomic_t)) != old);
+ return new;
+}
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+ return ia64_atomic_add(i, v) < 0;
+}
+
+#define atomic_add_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(i, &(v)->counter) \
+ : ia64_atomic_add(i, v))
+
+#define atomic_sub_return(i,v) \
+ ((__builtin_constant_p(i) && \
+ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
+ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
+ ? ia64_fetch_and_add(-(i), &(v)->counter) \
+ : ia64_atomic_sub(i, v))
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_add(i,v) atomic_add_return((i), (v))
+#define atomic_sub(i,v) atomic_sub_return((i), (v))
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#endif /* _ASM_IA64_ATOMIC_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)