--- v2.6.18-rc2-mm1~ori/net/dccp/ccid.c 2006-06-18 03:49:35.000000000 +0200 +++ v2.6.18-rc2-mm1/net/dccp/ccid.c 2006-07-28 16:19:32.000000000 +0200 @@ -12,48 +12,11 @@ */ #include "ccid.h" +#include static struct ccid_operations *ccids[CCID_MAX]; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) -static atomic_t ccids_lockct = ATOMIC_INIT(0); -static DEFINE_SPINLOCK(ccids_lock); - -/* - * The strategy is: modifications ccids vector are short, do not sleep and - * veeery rare, but read access should be free of any exclusive locks. - */ -static void ccids_write_lock(void) -{ - spin_lock(&ccids_lock); - while (atomic_read(&ccids_lockct) != 0) { - spin_unlock(&ccids_lock); - yield(); - spin_lock(&ccids_lock); - } -} - -static inline void ccids_write_unlock(void) -{ - spin_unlock(&ccids_lock); -} +static DEFINE_LW_RWLOCK(ccids_lock); -static inline void ccids_read_lock(void) -{ - atomic_inc(&ccids_lockct); - spin_unlock_wait(&ccids_lock); -} - -static inline void ccids_read_unlock(void) -{ - atomic_dec(&ccids_lockct); -} - -#else -#define ccids_write_lock() do { } while(0) -#define ccids_write_unlock() do { } while(0) -#define ccids_read_lock() do { } while(0) -#define ccids_read_unlock() do { } while(0) -#endif static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) { @@ -103,13 +66,13 @@ int ccid_register(struct ccid_operations if (ccid_ops->ccid_hc_tx_slab == NULL) goto out_free_rx_slab; - ccids_write_lock(); + lw_write_lock(&ccids_lock); err = -EEXIST; if (ccids[ccid_ops->ccid_id] == NULL) { ccids[ccid_ops->ccid_id] = ccid_ops; err = 0; } - ccids_write_unlock(); + lw_write_unlock(&ccids_lock); if (err != 0) goto out_free_tx_slab; @@ -131,9 +94,9 @@ EXPORT_SYMBOL_GPL(ccid_register); int ccid_unregister(struct ccid_operations *ccid_ops) { - ccids_write_lock(); + lw_write_lock(&ccids_lock); ccids[ccid_ops->ccid_id] = NULL; - ccids_write_unlock(); + lw_write_unlock(&ccids_lock); ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); ccid_ops->ccid_hc_tx_slab = NULL; @@ -152,15 +115,15 @@ struct ccid *ccid_new(unsigned char id, struct ccid_operations *ccid_ops; struct ccid *ccid = NULL; - ccids_read_lock(); + lw_read_lock(&ccids_lock); #ifdef CONFIG_KMOD if (ccids[id] == NULL) { /* We only try to load if in process context */ - ccids_read_unlock(); + lw_read_unlock(&ccids_lock); if (gfp & GFP_ATOMIC) goto out; request_module("net-dccp-ccid-%d", id); - ccids_read_lock(); + lw_read_lock(&ccids_lock); } #endif ccid_ops = ccids[id]; @@ -170,7 +133,7 @@ struct ccid *ccid_new(unsigned char id, if (!try_module_get(ccid_ops->ccid_owner)) goto out_unlock; - ccids_read_unlock(); + lw_read_unlock(&ccids_lock); ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : ccid_ops->ccid_hc_tx_slab, gfp); @@ -191,7 +154,7 @@ struct ccid *ccid_new(unsigned char id, out: return ccid; out_unlock: - ccids_read_unlock(); + lw_read_unlock(&ccids_lock); goto out; out_free_ccid: kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : @@ -235,10 +198,10 @@ static void ccid_delete(struct ccid *cci ccid_ops->ccid_hc_tx_exit(sk); kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid); } - ccids_read_lock(); + lw_read_lock(&ccids_lock); if (ccids[ccid_ops->ccid_id] != NULL) module_put(ccid_ops->ccid_owner); - ccids_read_unlock(); + lw_read_unlock(&ccids_lock); } void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)