diff options
author | Eric Biederman <ebiederm@xmission.com> | 2003-07-21 20:13:45 +0000 |
---|---|---|
committer | Eric Biederman <ebiederm@xmission.com> | 2003-07-21 20:13:45 +0000 |
commit | 2c018fba95a5f40c4eaaa20421e8c893dffdb62e (patch) | |
tree | 3b6ecc6eb72d145dd70bb549fe0130370d7e40fb /src/arch/i386/include | |
parent | 6d4512cdf976fc071720dbec686cf8a1a40f1db0 (diff) | |
download | coreboot-2c018fba95a5f40c4eaaa20421e8c893dffdb62e.tar.xz |
- First pass at s2880 support.
- SMP cleanups (remove SMP only use CONFIG_SMP)
- Minor tweaks to romcc to keep it from taking forever compiling
- failover fixes
- Get a good implementation of k8_cpufixup and sizeram for the opteron
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@998 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/arch/i386/include')
-rw-r--r-- | src/arch/i386/include/arch/smp/atomic.h | 69 | ||||
-rw-r--r-- | src/arch/i386/include/arch/smp/spinlock.h | 57 |
2 files changed, 126 insertions, 0 deletions
diff --git a/src/arch/i386/include/arch/smp/atomic.h b/src/arch/i386/include/arch/smp/atomic.h new file mode 100644 index 0000000000..7b68377c22 --- /dev/null +++ b/src/arch/i386/include/arch/smp/atomic.h @@ -0,0 +1,69 @@ +#ifndef ARCH_SMP_ATOMIC_H +#define ARCH_SMP_ATOMIC_H + +/* + * Make sure gcc doesn't try to be clever and move things around + * on us. We need to use _exactly_ the address the user gave us, + * not some alias that contains the same information. + */ +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_set(v,i) (((v)->counter) = (i)) + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ void atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + "lock ; incl %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +static __inline__ void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__( + "lock ; decl %0" + :"=m" (v->counter) + :"m" (v->counter)); +} + + + +#endif /* ARCH_SMP_ATOMIC_H */ diff --git a/src/arch/i386/include/arch/smp/spinlock.h b/src/arch/i386/include/arch/smp/spinlock.h new file mode 100644 index 0000000000..d0cc11c6a7 --- /dev/null +++ b/src/arch/i386/include/arch/smp/spinlock.h @@ -0,0 +1,57 @@ +#ifndef ARCH_SMP_SPINLOCK_H +#define ARCH_SMP_SPINLOCK_H + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int lock; +} spinlock_t; + + +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ +#define barrier() __asm__ __volatile__("": : :"memory") +#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + +#define spin_lock_string \ + "\n1:\t" \ + "lock ; decb %0\n\t" \ + "js 2f\n" \ + ".section .text.lock,\"ax\"\n" \ + "2:\t" \ + "cmpb $0,%0\n\t" \ + "rep;nop\n\t" \ + "jle 2b\n\t" \ + "jmp 1b\n" \ + ".previous" + +/* + * This works. Despite all the confusion. + */ +#define spin_unlock_string \ + "movb $1,%0" + +static inline void spin_lock(spinlock_t *lock) +{ + __asm__ __volatile__( + spin_lock_string + :"=m" (lock->lock) : : "memory"); +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__( + spin_unlock_string + :"=m" (lock->lock) : : "memory"); +} + +#endif /* ARCH_SMP_SPINLOCK_H */ |