--- v2.6.18-rc2-mm1~ori/include/linux/lw_rwlock.h 1970-01-01 01:00:00.000000000 +0100 +++ v2.6.18-rc2-mm1/include/linux/lw_rwlock.h 2006-07-28 17:25:04.000000000 +0200 @@ -0,0 +1,71 @@ +#ifndef __LINUX_LW_RWLOCK_H +#define __LINUX_LW_RWLOCK_H + +/* + * LightWeight readwriter lock. + * The strategy is: modifications while the lock is held are short, do not + * sleep and veeery rare, but read access should be free of any exclusive + * locks. + * The original implementation was written for net/socket.c + */ + +#include + + +struct lw_rwlock { + /* tracks down the number of current readers */ + atomic_t readers; + /* the actual lock, only held by writers */ + spinlock_t lock; +}; + +#define __LW_RWLOCK_UNLOCKED(lockname) \ + { { 0 }, __SPIN_LOCK_UNLOCKED(lockname) } + +#define lw_rwlock_init(x) \ + do { *(x) = (lw_rwlock_t) __LW_RWLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_LW_RWLOCK(x) \ + struct lw_rwlock x = __LW_RWLOCK_UNLOCKED(x) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + +static inline void lw_write_lock(struct lw_rwlock *l) +{ + spin_lock(&l->lock); + while (atomic_read(&l->readers) != 0) { + spin_unlock(&l->lock); + + yield(); + + spin_lock(&l->lock); + } +} + +static inline void lw_write_unlock(struct lw_rwlock *l) +{ + spin_unlock(&l->lock); +} + +static inline void lw_read_lock(struct lw_rwlock *l) +{ + atomic_inc(&l->readers); + spin_unlock_wait(&l->lock); +} + +static inline void lw_read_unlock(struct lw_rwlock *l) +{ + atomic_dec(&l->readers); +} + +#else + +#define lw_write_lock(x) do { } while(0) +#define lw_write_unlock(x) do { } while(0) +#define lw_read_lock(x) do { } while(0) +#define lw_read_unlock() do { } while(0) + +#endif /* CONFIG_SMP || CONFIG_PREEMPT */ + + +#endif /* __LINUX_LW_RWLOCK_H */