
From: Thomas Gleixner <tglx@linutronix.de>

Use the new lock initializers DEFINE_SPIN_LOCK and DEFINE_RW_LOCK

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/net/802/psnap.c                        |    2 +-
 25-akpm/net/802/tr.c                           |    2 +-
 25-akpm/net/appletalk/aarp.c                   |    2 +-
 25-akpm/net/appletalk/ddp.c                    |    6 +++---
 25-akpm/net/atm/br2684.c                       |    2 +-
 25-akpm/net/atm/common.c                       |    2 +-
 25-akpm/net/atm/resources.c                    |    2 +-
 25-akpm/net/ax25/af_ax25.c                     |    2 +-
 25-akpm/net/ax25/ax25_dev.c                    |    2 +-
 25-akpm/net/ax25/ax25_iface.c                  |    6 +++---
 25-akpm/net/ax25/ax25_out.c                    |    2 +-
 25-akpm/net/ax25/ax25_route.c                  |    2 +-
 25-akpm/net/ax25/ax25_uid.c                    |    2 +-
 25-akpm/net/bridge/netfilter/ebt_limit.c       |    2 +-
 25-akpm/net/bridge/netfilter/ebt_log.c         |    2 +-
 25-akpm/net/bridge/netfilter/ebt_ulog.c        |    2 +-
 25-akpm/net/core/dev.c                         |    6 +++---
 25-akpm/net/core/dst.c                         |    2 +-
 25-akpm/net/core/gen_estimator.c               |    2 +-
 25-akpm/net/core/link_watch.c                  |    2 +-
 25-akpm/net/core/neighbour.c                   |    2 +-
 25-akpm/net/core/netfilter.c                   |    6 +++---
 25-akpm/net/core/netpoll.c                     |    6 +++---
 25-akpm/net/decnet/af_decnet.c                 |    2 +-
 25-akpm/net/decnet/dn_dev.c                    |    2 +-
 25-akpm/net/decnet/dn_fib.c                    |    4 ++--
 25-akpm/net/decnet/dn_route.c                  |    2 +-
 25-akpm/net/decnet/dn_rules.c                  |    2 +-
 25-akpm/net/decnet/dn_table.c                  |    2 +-
 25-akpm/net/econet/af_econet.c                 |    2 +-
 25-akpm/net/ipv4/af_inet.c                     |    2 +-
 25-akpm/net/ipv4/fib_hash.c                    |    2 +-
 25-akpm/net/ipv4/fib_rules.c                   |    2 +-
 25-akpm/net/ipv4/fib_semantics.c               |    4 ++--
 25-akpm/net/ipv4/inetpeer.c                    |    6 +++---
 25-akpm/net/ipv4/ip_fragment.c                 |    2 +-
 25-akpm/net/ipv4/ip_gre.c                      |    2 +-
 25-akpm/net/ipv4/ip_sockglue.c                 |    2 +-
 25-akpm/net/ipv4/ipconfig.c                    |    2 +-
 25-akpm/net/ipv4/ipip.c                        |    2 +-
 25-akpm/net/ipv4/ipmr.c                        |    4 ++--
 25-akpm/net/ipv4/ipvs/ip_vs_ctl.c              |   10 +++++-----
 25-akpm/net/ipv4/ipvs/ip_vs_est.c              |    2 +-
 25-akpm/net/ipv4/ipvs/ip_vs_proto_tcp.c        |    2 +-
 25-akpm/net/ipv4/ipvs/ip_vs_proto_udp.c        |    2 +-
 25-akpm/net/ipv4/ipvs/ip_vs_sched.c            |    2 +-
 25-akpm/net/ipv4/ipvs/ip_vs_sync.c             |    4 ++--
 25-akpm/net/ipv4/netfilter/ip_nat_snmp_basic.c |    2 +-
 25-akpm/net/ipv4/netfilter/ip_queue.c          |    2 +-
 25-akpm/net/ipv4/netfilter/ipt_LOG.c           |    2 +-
 25-akpm/net/ipv4/netfilter/ipt_limit.c         |    2 +-
 25-akpm/net/ipv4/netfilter/ipt_recent.c        |    2 +-
 25-akpm/net/ipv4/protocol.c                    |    2 +-
 25-akpm/net/ipv4/raw.c                         |    2 +-
 25-akpm/net/ipv4/route.c                       |    6 +++---
 25-akpm/net/ipv4/tcp_minisocks.c               |    2 +-
 25-akpm/net/ipv4/udp.c                         |    2 +-
 25-akpm/net/ipv6/addrconf.c                    |    8 ++++----
 25-akpm/net/ipv6/af_inet6.c                    |    2 +-
 25-akpm/net/ipv6/anycast.c                     |    2 +-
 25-akpm/net/ipv6/ip6_fib.c                     |    4 ++--
 25-akpm/net/ipv6/ip6_flowlabel.c               |    4 ++--
 25-akpm/net/ipv6/ip6_output.c                  |    2 +-
 25-akpm/net/ipv6/ip6_tunnel.c                  |    2 +-
 25-akpm/net/ipv6/ipv6_sockglue.c               |    2 +-
 25-akpm/net/ipv6/mcast.c                       |    2 +-
 25-akpm/net/ipv6/netfilter/ip6_queue.c         |    2 +-
 25-akpm/net/ipv6/netfilter/ip6t_LOG.c          |    2 +-
 25-akpm/net/ipv6/netfilter/ip6t_limit.c        |    2 +-
 25-akpm/net/ipv6/protocol.c                    |    2 +-
 25-akpm/net/ipv6/raw.c                         |    2 +-
 25-akpm/net/ipv6/reassembly.c                  |    2 +-
 25-akpm/net/ipv6/route.c                       |    4 ++--
 25-akpm/net/ipv6/sit.c                         |    2 +-
 25-akpm/net/ipv6/xfrm6_tunnel.c                |    2 +-
 25-akpm/net/ipx/af_ipx.c                       |    2 +-
 25-akpm/net/ipx/ipx_route.c                    |    2 +-
 25-akpm/net/key/af_key.c                       |    4 ++--
 25-akpm/net/lapb/lapb_iface.c                  |    2 +-
 25-akpm/net/llc/llc_core.c                     |    2 +-
 25-akpm/net/netlink/af_netlink.c               |    4 ++--
 25-akpm/net/netrom/af_netrom.c                 |    2 +-
 25-akpm/net/netrom/nr_route.c                  |    4 ++--
 25-akpm/net/packet/af_packet.c                 |    2 +-
 25-akpm/net/rose/af_rose.c                     |    2 +-
 25-akpm/net/rose/rose_route.c                  |    6 +++---
 25-akpm/net/rxrpc/krxiod.c                     |    4 ++--
 25-akpm/net/rxrpc/krxsecd.c                    |    2 +-
 25-akpm/net/rxrpc/krxtimod.c                   |    2 +-
 25-akpm/net/rxrpc/transport.c                  |    2 +-
 25-akpm/net/sched/act_api.c                    |    2 +-
 25-akpm/net/sched/cls_api.c                    |    2 +-
 25-akpm/net/sched/estimator.c                  |    2 +-
 25-akpm/net/sched/gact.c                       |    2 +-
 25-akpm/net/sched/ipt.c                        |    2 +-
 25-akpm/net/sched/mirred.c                     |    2 +-
 25-akpm/net/sched/pedit.c                      |    2 +-
 25-akpm/net/sched/police.c                     |    2 +-
 25-akpm/net/sched/sch_api.c                    |    2 +-
 25-akpm/net/sched/sch_generic.c                |    2 +-
 25-akpm/net/sctp/protocol.c                    |    2 +-
 25-akpm/net/socket.c                           |    2 +-
 25-akpm/net/sunrpc/auth.c                      |    2 +-
 25-akpm/net/sunrpc/auth_gss/auth_gss.c         |    2 +-
 25-akpm/net/sunrpc/auth_gss/gss_mech_switch.c  |    2 +-
 25-akpm/net/sunrpc/cache.c                     |    6 +++---
 25-akpm/net/sunrpc/pmap_clnt.c                 |    2 +-
 25-akpm/net/sunrpc/sched.c                     |    2 +-
 25-akpm/net/sunrpc/svcauth.c                   |    2 +-
 25-akpm/net/unix/af_unix.c                     |    2 +-
 25-akpm/net/wanrouter/af_wanpipe.c             |    2 +-
 25-akpm/net/x25/af_x25.c                       |    2 +-
 25-akpm/net/x25/x25_link.c                     |    2 +-
 25-akpm/net/x25/x25_route.c                    |    2 +-
 25-akpm/net/xfrm/xfrm_policy.c                 |    6 +++---
 25-akpm/net/xfrm/xfrm_state.c                  |   10 +++++-----
 116 files changed, 158 insertions(+), 158 deletions(-)

diff -puN net/802/psnap.c~lock-initializer-cleanup-networking net/802/psnap.c
--- 25/net/802/psnap.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/802/psnap.c	Wed Jan 12 16:54:32 2005
@@ -22,7 +22,7 @@
 #include <linux/init.h>
 
 static LIST_HEAD(snap_list);
-static spinlock_t snap_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(snap_lock);
 static struct llc_sap *snap_sap;
 
 /*
diff -puN net/802/tr.c~lock-initializer-cleanup-networking net/802/tr.c
--- 25/net/802/tr.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/802/tr.c	Wed Jan 12 16:54:32 2005
@@ -66,7 +66,7 @@ struct rif_cache_s {	
  
 static struct rif_cache_s *rif_table[RIF_TABLE_SIZE];
 
-static spinlock_t rif_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rif_lock);
 
 
 /*
diff -puN net/appletalk/aarp.c~lock-initializer-cleanup-networking net/appletalk/aarp.c
--- 25/net/appletalk/aarp.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/appletalk/aarp.c	Wed Jan 12 16:54:32 2005
@@ -77,7 +77,7 @@ static struct aarp_entry *proxies[AARP_H
 static int unresolved_count;
 
 /* One lock protects it all. */
-static rwlock_t aarp_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(aarp_lock);
 
 /* Used to walk the list and purge/kick entries.  */
 static struct timer_list aarp_timer;
diff -puN net/appletalk/ddp.c~lock-initializer-cleanup-networking net/appletalk/ddp.c
--- 25/net/appletalk/ddp.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/appletalk/ddp.c	Wed Jan 12 16:54:32 2005
@@ -71,7 +71,7 @@ static struct proto_ops atalk_dgram_ops;
 \**************************************************************************/
 
 HLIST_HEAD(atalk_sockets);
-rwlock_t atalk_sockets_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(atalk_sockets_lock);
 
 static inline void __atalk_insert_socket(struct sock *sk)
 {
@@ -193,10 +193,10 @@ static inline void atalk_destroy_socket(
 
 /* Anti-deadlock ordering is atalk_routes_lock --> iface_lock -DaveM */
 struct atalk_route *atalk_routes;
-rwlock_t atalk_routes_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(atalk_routes_lock);
 
 struct atalk_iface *atalk_interfaces;
-rwlock_t atalk_interfaces_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(atalk_interfaces_lock);
 
 /* For probing devices or in a routerless network */
 struct atalk_route atrtr_default;
diff -puN net/atm/br2684.c~lock-initializer-cleanup-networking net/atm/br2684.c
--- 25/net/atm/br2684.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/atm/br2684.c	Wed Jan 12 16:54:32 2005
@@ -97,7 +97,7 @@ struct br2684_dev {
  * do read-locking under interrupt context, so write locking must block
  * the current CPU's interrupts
  */
-static rwlock_t devs_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(devs_lock);
 
 static LIST_HEAD(br2684_devs);
 
diff -puN net/atm/common.c~lock-initializer-cleanup-networking net/atm/common.c
--- 25/net/atm/common.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/atm/common.c	Wed Jan 12 16:54:32 2005
@@ -39,7 +39,7 @@
 #endif
 
 struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
-rwlock_t vcc_sklist_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(vcc_sklist_lock);
 
 void __vcc_insert_socket(struct sock *sk)
 {
diff -puN net/atm/resources.c~lock-initializer-cleanup-networking net/atm/resources.c
--- 25/net/atm/resources.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/atm/resources.c	Wed Jan 12 16:54:32 2005
@@ -24,7 +24,7 @@
 
 
 LIST_HEAD(atm_devs);
-spinlock_t atm_dev_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(atm_dev_lock);
 
 static struct atm_dev *__alloc_atm_dev(const char *type)
 {
diff -puN net/ax25/af_ax25.c~lock-initializer-cleanup-networking net/ax25/af_ax25.c
--- 25/net/ax25/af_ax25.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/af_ax25.c	Wed Jan 12 16:54:32 2005
@@ -52,7 +52,7 @@
 
 
 HLIST_HEAD(ax25_list);
-spinlock_t ax25_list_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(ax25_list_lock);
 
 static struct proto_ops ax25_proto_ops;
 
diff -puN net/ax25/ax25_dev.c~lock-initializer-cleanup-networking net/ax25/ax25_dev.c
--- 25/net/ax25/ax25_dev.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/ax25_dev.c	Wed Jan 12 16:54:32 2005
@@ -32,7 +32,7 @@
 #include <linux/init.h>
 
 ax25_dev *ax25_dev_list;
-spinlock_t ax25_dev_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(ax25_dev_lock);
 
 ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
 {
diff -puN net/ax25/ax25_iface.c~lock-initializer-cleanup-networking net/ax25/ax25_iface.c
--- 25/net/ax25/ax25_iface.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/ax25_iface.c	Wed Jan 12 16:54:32 2005
@@ -34,20 +34,20 @@ static struct protocol_struct {
 	unsigned int pid;
 	int (*func)(struct sk_buff *, ax25_cb *);
 } *protocol_list = NULL;
-static rwlock_t protocol_list_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(protocol_list_lock);
 
 static struct linkfail_struct {
 	struct linkfail_struct *next;
 	void (*func)(ax25_cb *, int);
 } *linkfail_list = NULL;
-static spinlock_t linkfail_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(linkfail_lock);
 
 static struct listen_struct {
 	struct listen_struct *next;
 	ax25_address  callsign;
 	struct net_device *dev;
 } *listen_list = NULL;
-static spinlock_t listen_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(listen_lock);
 
 int ax25_protocol_register(unsigned int pid,
 	int (*func)(struct sk_buff *, ax25_cb *))
diff -puN net/ax25/ax25_out.c~lock-initializer-cleanup-networking net/ax25/ax25_out.c
--- 25/net/ax25/ax25_out.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/ax25_out.c	Wed Jan 12 16:54:32 2005
@@ -32,7 +32,7 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 
-static spinlock_t ax25_frag_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ax25_frag_lock);
 
 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
 {
diff -puN net/ax25/ax25_route.c~lock-initializer-cleanup-networking net/ax25/ax25_route.c
--- 25/net/ax25/ax25_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/ax25_route.c	Wed Jan 12 16:54:32 2005
@@ -37,7 +37,7 @@
 #include <linux/seq_file.h>
 
 static ax25_route *ax25_route_list;
-static rwlock_t ax25_route_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ax25_route_lock);
 
 static ax25_route *ax25_get_route(ax25_address *, struct net_device *);
 
diff -puN net/ax25/ax25_uid.c~lock-initializer-cleanup-networking net/ax25/ax25_uid.c
--- 25/net/ax25/ax25_uid.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ax25/ax25_uid.c	Wed Jan 12 16:54:32 2005
@@ -42,7 +42,7 @@
  */
 
 static ax25_uid_assoc *ax25_uid_list;
-static rwlock_t ax25_uid_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ax25_uid_lock);
 
 int ax25_uid_policy = 0;
 
diff -puN net/bridge/netfilter/ebt_limit.c~lock-initializer-cleanup-networking net/bridge/netfilter/ebt_limit.c
--- 25/net/bridge/netfilter/ebt_limit.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/bridge/netfilter/ebt_limit.c	Wed Jan 12 16:54:32 2005
@@ -18,7 +18,7 @@
 #include <linux/netdevice.h>
 #include <linux/spinlock.h>
 
-static spinlock_t limit_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(limit_lock);
 
 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
 
diff -puN net/bridge/netfilter/ebt_log.c~lock-initializer-cleanup-networking net/bridge/netfilter/ebt_log.c
--- 25/net/bridge/netfilter/ebt_log.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/bridge/netfilter/ebt_log.c	Wed Jan 12 16:54:32 2005
@@ -15,7 +15,7 @@
 #include <linux/if_arp.h>
 #include <linux/spinlock.h>
 
-static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ebt_log_lock);
 
 static int ebt_log_check(const char *tablename, unsigned int hookmask,
    const struct ebt_entry *e, void *data, unsigned int datalen)
diff -puN net/bridge/netfilter/ebt_ulog.c~lock-initializer-cleanup-networking net/bridge/netfilter/ebt_ulog.c
--- 25/net/bridge/netfilter/ebt_ulog.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/bridge/netfilter/ebt_ulog.c	Wed Jan 12 16:54:32 2005
@@ -255,7 +255,7 @@ static int __init init(void)
 		init_timer(&ulog_buffers[i].timer);
 		ulog_buffers[i].timer.function = ulog_timer;
 		ulog_buffers[i].timer.data = i;
-		ulog_buffers[i].lock = SPIN_LOCK_UNLOCKED;
+		spin_lock_init(&ulog_buffers[i].lock);
 	}
 
 	ebtulognl = netlink_kernel_create(NETLINK_NFLOG, NULL);
diff -puN net/core/dev.c~lock-initializer-cleanup-networking net/core/dev.c
--- 25/net/core/dev.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/dev.c	Wed Jan 12 16:54:32 2005
@@ -154,7 +154,7 @@
  *		86DD	IPv6
  */
 
-static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ptype_lock);
 static struct list_head ptype_base[16];	/* 16 way hashed list */
 static struct list_head ptype_all;		/* Taps */
 
@@ -184,7 +184,7 @@ static struct timer_list samp_timer = TI
  */
 struct net_device *dev_base;
 struct net_device **dev_tail = &dev_base;
-rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(dev_base_lock);
 
 EXPORT_SYMBOL(dev_base);
 EXPORT_SYMBOL(dev_base_lock);
@@ -2674,7 +2674,7 @@ static int dev_new_index(void)
 static int dev_boot_phase = 1;
 
 /* Delayed registration/unregisteration */
-static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(net_todo_list_lock);
 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
 
 static inline void net_set_todo(struct net_device *dev)
diff -puN net/core/dst.c~lock-initializer-cleanup-networking net/core/dst.c
--- 25/net/core/dst.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/dst.c	Wed Jan 12 16:54:32 2005
@@ -32,7 +32,7 @@ static struct dst_entry 	*dst_garbage_li
 #if RT_CACHE_DEBUG >= 2 
 static atomic_t			 dst_total = ATOMIC_INIT(0);
 #endif
-static spinlock_t		 dst_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dst_lock);
 
 static unsigned long dst_gc_timer_expires;
 static unsigned long dst_gc_timer_inc = DST_GC_MAX;
diff -puN net/core/gen_estimator.c~lock-initializer-cleanup-networking net/core/gen_estimator.c
--- 25/net/core/gen_estimator.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/gen_estimator.c	Wed Jan 12 16:54:32 2005
@@ -100,7 +100,7 @@ struct gen_estimator_head
 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
 
 /* Estimator array lock */
-static rwlock_t est_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(est_lock);
 
 static void est_timer(unsigned long arg)
 {
diff -puN net/core/link_watch.c~lock-initializer-cleanup-networking net/core/link_watch.c
--- 25/net/core/link_watch.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/link_watch.c	Wed Jan 12 16:54:32 2005
@@ -38,7 +38,7 @@ static void linkwatch_event(void *dummy)
 static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
 
 static LIST_HEAD(lweventlist);
-static spinlock_t lweventlist_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(lweventlist_lock);
 
 struct lw_event {
 	struct list_head list;
diff -puN net/core/neighbour.c~lock-initializer-cleanup-networking net/core/neighbour.c
--- 25/net/core/neighbour.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/neighbour.c	Wed Jan 12 16:54:32 2005
@@ -93,7 +93,7 @@ static struct file_operations neigh_stat
    list of neighbour tables. This list is used only in process context,
  */
 
-static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(neigh_tbl_lock);
 
 static int neigh_blackhole(struct sk_buff *skb)
 {
diff -puN net/core/netfilter.c~lock-initializer-cleanup-networking net/core/netfilter.c
--- 25/net/core/netfilter.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/netfilter.c	Wed Jan 12 16:54:32 2005
@@ -47,7 +47,7 @@ static DECLARE_MUTEX(nf_sockopt_mutex);
 
 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
 static LIST_HEAD(nf_sockopts);
-static spinlock_t nf_hook_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(nf_hook_lock);
 
 /* 
  * A queue handler may be registered for each protocol.  Each is protected by
@@ -58,7 +58,7 @@ static struct nf_queue_handler_t {
 	nf_queue_outfn_t outfn;
 	void *data;
 } queue_handler[NPROTO];
-static rwlock_t queue_handler_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(queue_handler_lock);
 
 int nf_register_hook(struct nf_hook_ops *reg)
 {
@@ -744,7 +744,7 @@ EXPORT_SYMBOL(skb_ip_make_writable);
 
 static nf_logfn *nf_logging[NPROTO]; /* = NULL */
 static int reported = 0;
-static spinlock_t nf_log_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(nf_log_lock);
 
 int nf_log_register(int pf, nf_logfn *logfn)
 {
diff -puN net/core/netpoll.c~lock-initializer-cleanup-networking net/core/netpoll.c
--- 25/net/core/netpoll.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/core/netpoll.c	Wed Jan 12 16:54:32 2005
@@ -31,15 +31,15 @@
 #define MAX_SKBS 32
 #define MAX_UDP_CHUNK 1460
 
-static spinlock_t skb_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(skb_list_lock);
 static int nr_skbs;
 static struct sk_buff *skbs;
 
-static spinlock_t rx_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rx_list_lock);
 static LIST_HEAD(rx_list);
 
 static atomic_t trapped;
-spinlock_t netpoll_poll_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(netpoll_poll_lock);
 
 #define NETPOLL_RX_ENABLED  1
 #define NETPOLL_RX_DROP     2
diff -puN net/decnet/af_decnet.c~lock-initializer-cleanup-networking net/decnet/af_decnet.c
--- 25/net/decnet/af_decnet.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/af_decnet.c	Wed Jan 12 16:54:32 2005
@@ -151,7 +151,7 @@ static void dn_keepalive(struct sock *sk
 
 static kmem_cache_t *dn_sk_cachep;
 static struct proto_ops dn_proto_ops;
-static rwlock_t dn_hash_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dn_hash_lock);
 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
 static struct hlist_head dn_wild_sk;
 
diff -puN net/decnet/dn_dev.c~lock-initializer-cleanup-networking net/decnet/dn_dev.c
--- 25/net/decnet/dn_dev.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/dn_dev.c	Wed Jan 12 16:54:32 2005
@@ -65,7 +65,7 @@ extern struct neigh_table dn_neigh_table
  */
 dn_address decnet_address = 0;
 
-static rwlock_t dndev_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dndev_lock);
 static struct net_device *decnet_default_device;
 static struct notifier_block *dnaddr_chain;
 
diff -puN net/decnet/dn_fib.c~lock-initializer-cleanup-networking net/decnet/dn_fib.c
--- 25/net/decnet/dn_fib.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/dn_fib.c	Wed Jan 12 16:54:32 2005
@@ -57,9 +57,9 @@
 
 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-static spinlock_t dn_fib_multipath_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dn_fib_multipath_lock);
 static struct dn_fib_info *dn_fib_info_list;
-static rwlock_t dn_fib_info_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dn_fib_info_lock);
 int dn_fib_info_cnt;
 
 static struct
diff -puN net/decnet/dn_route.c~lock-initializer-cleanup-networking net/decnet/dn_route.c
--- 25/net/decnet/dn_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/dn_route.c	Wed Jan 12 16:54:32 2005
@@ -336,7 +336,7 @@ nothing_to_declare:
 	}
 }
 
-static spinlock_t dn_rt_flush_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dn_rt_flush_lock);
 
 void dn_rt_cache_flush(int delay)
 {
diff -puN net/decnet/dn_rules.c~lock-initializer-cleanup-networking net/decnet/dn_rules.c
--- 25/net/decnet/dn_rules.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/dn_rules.c	Wed Jan 12 16:54:32 2005
@@ -68,7 +68,7 @@ static struct dn_fib_rule default_rule =
 };
 
 static struct dn_fib_rule *dn_fib_rules = &default_rule;
-static rwlock_t dn_fib_rules_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dn_fib_rules_lock);
 
 
 int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
diff -puN net/decnet/dn_table.c~lock-initializer-cleanup-networking net/decnet/dn_table.c
--- 25/net/decnet/dn_table.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/decnet/dn_table.c	Wed Jan 12 16:54:32 2005
@@ -76,7 +76,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_e
 
 #define RT_TABLE_MIN 1
 
-static rwlock_t dn_fib_tables_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dn_fib_tables_lock);
 struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1];
 
 static kmem_cache_t *dn_hash_kmem;
diff -puN net/econet/af_econet.c~lock-initializer-cleanup-networking net/econet/af_econet.c
--- 25/net/econet/af_econet.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/econet/af_econet.c	Wed Jan 12 16:54:32 2005
@@ -47,7 +47,7 @@
 
 static struct proto_ops econet_ops;
 static struct hlist_head econet_sklist;
-static rwlock_t econet_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(econet_lock);
 
 /* Since there are only 256 possible network numbers (or fewer, depends
    how you count) it makes sense to use a simple lookup table. */
diff -puN net/ipv4/af_inet.c~lock-initializer-cleanup-networking net/ipv4/af_inet.c
--- 25/net/ipv4/af_inet.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/af_inet.c	Wed Jan 12 16:54:32 2005
@@ -125,7 +125,7 @@ extern void ip_mc_drop_socket(struct soc
  * build a new socket.
  */
 static struct list_head inetsw[SOCK_MAX];
-static spinlock_t inetsw_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(inetsw_lock);
 
 /* New destruction routine */
 
diff -puN net/ipv4/fib_hash.c~lock-initializer-cleanup-networking net/ipv4/fib_hash.c
--- 25/net/ipv4/fib_hash.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/fib_hash.c	Wed Jan 12 16:54:32 2005
@@ -92,7 +92,7 @@ static inline u32 fz_key(u32 dst, struct
 	return dst & FZ_MASK(fz);
 }
 
-static rwlock_t fib_hash_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(fib_hash_lock);
 
 #define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
 
diff -puN net/ipv4/fib_rules.c~lock-initializer-cleanup-networking net/ipv4/fib_rules.c
--- 25/net/ipv4/fib_rules.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/fib_rules.c	Wed Jan 12 16:54:32 2005
@@ -99,7 +99,7 @@ static struct fib_rule local_rule = {
 };
 
 static struct fib_rule *fib_rules = &local_rule;
-static rwlock_t fib_rules_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(fib_rules_lock);
 
 int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
diff -puN net/ipv4/fib_semantics.c~lock-initializer-cleanup-networking net/ipv4/fib_semantics.c
--- 25/net/ipv4/fib_semantics.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/fib_semantics.c	Wed Jan 12 16:54:32 2005
@@ -47,7 +47,7 @@
 
 #define FSprintk(a...)
 
-static rwlock_t fib_info_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(fib_info_lock);
 static struct hlist_head *fib_info_hash;
 static struct hlist_head *fib_info_laddrhash;
 static unsigned int fib_hash_size;
@@ -59,7 +59,7 @@ static struct hlist_head fib_info_devhas
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
-static spinlock_t fib_multipath_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(fib_multipath_lock);
 
 #define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
 for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
diff -puN net/ipv4/inetpeer.c~lock-initializer-cleanup-networking net/ipv4/inetpeer.c
--- 25/net/ipv4/inetpeer.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/inetpeer.c	Wed Jan 12 16:54:32 2005
@@ -70,7 +70,7 @@
  */
 
 /* Exported for inet_getid inline function.  */
-spinlock_t inet_peer_idlock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(inet_peer_idlock);
 
 static kmem_cache_t *peer_cachep;
 
@@ -82,7 +82,7 @@ static struct inet_peer peer_fake_node =
 };
 #define peer_avl_empty (&peer_fake_node)
 static struct inet_peer *peer_root = peer_avl_empty;
-static rwlock_t peer_pool_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(peer_pool_lock);
 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 
 static volatile int peer_total;
@@ -95,7 +95,7 @@ int inet_peer_maxttl = 10 * 60 * HZ;	/* 
 /* Exported for inet_putpeer inline function.  */
 struct inet_peer *inet_peer_unused_head,
 		**inet_peer_unused_tailp = &inet_peer_unused_head;
-spinlock_t inet_peer_unused_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(inet_peer_unused_lock);
 #define PEER_MAX_CLEANUP_WORK 30
 
 static void peer_check_expire(unsigned long dummy);
diff -puN net/ipv4/ipconfig.c~lock-initializer-cleanup-networking net/ipv4/ipconfig.c
--- 25/net/ipv4/ipconfig.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipconfig.c	Wed Jan 12 16:54:32 2005
@@ -152,7 +152,7 @@ static char user_dev_name[IFNAMSIZ] __in
 static int ic_proto_have_if __initdata = 0;
 
 #ifdef IPCONFIG_DYNAMIC
-static spinlock_t ic_recv_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ic_recv_lock);
 static volatile int ic_got_reply __initdata = 0;    /* Proto(s) that replied */
 #endif
 #ifdef IPCONFIG_DHCP
diff -puN net/ipv4/ip_fragment.c~lock-initializer-cleanup-networking net/ipv4/ip_fragment.c
--- 25/net/ipv4/ip_fragment.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ip_fragment.c	Wed Jan 12 16:54:32 2005
@@ -99,7 +99,7 @@ struct ipq {
 
 /* Per-bucket lock is easy to add now. */
 static struct ipq *ipq_hash[IPQ_HASHSZ];
-static rwlock_t ipfrag_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipfrag_lock);
 static u32 ipfrag_hash_rnd;
 static LIST_HEAD(ipq_lru_list);
 int ip_frag_nqueues = 0;
diff -puN net/ipv4/ip_gre.c~lock-initializer-cleanup-networking net/ipv4/ip_gre.c
--- 25/net/ipv4/ip_gre.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ip_gre.c	Wed Jan 12 16:54:32 2005
@@ -152,7 +152,7 @@ static struct ip_tunnel *tunnels[4][HASH
 #define tunnels_l	(tunnels[1])
 #define tunnels_wc	(tunnels[0])
 
-static rwlock_t ipgre_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipgre_lock);
 
 /* Given src, dst and key, find appropriate for input tunnel. */
 
diff -puN net/ipv4/ipip.c~lock-initializer-cleanup-networking net/ipv4/ipip.c
--- 25/net/ipv4/ipip.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipip.c	Wed Jan 12 16:54:32 2005
@@ -132,7 +132,7 @@ static struct ip_tunnel *tunnels_l[HASH_
 static struct ip_tunnel *tunnels_wc[1];
 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
 
-static rwlock_t ipip_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipip_lock);
 
 static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local)
 {
diff -puN net/ipv4/ipmr.c~lock-initializer-cleanup-networking net/ipv4/ipmr.c
--- 25/net/ipv4/ipmr.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipmr.c	Wed Jan 12 16:54:32 2005
@@ -73,7 +73,7 @@ static struct sock *mroute_socket;
    Note that the changes are semaphored via rtnl_lock.
  */
 
-static rwlock_t mrt_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(mrt_lock);
 
 /*
  *	Multicast router control variables
@@ -93,7 +93,7 @@ static struct mfc_cache *mfc_unres_queue
 static atomic_t cache_resolve_queue_len;		/* Size of unresolved	*/
 
 /* Special spinlock for queue of unresolved entries */
-static spinlock_t mfc_unres_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mfc_unres_lock);
 
 /* We return to original Alan's scheme. Hash table of resolved
    entries is changed only in process context and protected
diff -puN net/ipv4/ip_sockglue.c~lock-initializer-cleanup-networking net/ipv4/ip_sockglue.c
--- 25/net/ipv4/ip_sockglue.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ip_sockglue.c	Wed Jan 12 16:54:32 2005
@@ -186,7 +186,7 @@ int ip_cmsg_send(struct msghdr *msg, str
    sent to multicast group to reach destination designated router.
  */
 struct ip_ra_chain *ip_ra_chain;
-rwlock_t ip_ra_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(ip_ra_lock);
 
 int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *))
 {
diff -puN net/ipv4/ipvs/ip_vs_ctl.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_ctl.c
--- 25/net/ipv4/ipvs/ip_vs_ctl.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_ctl.c	Wed Jan 12 16:54:32 2005
@@ -45,19 +45,19 @@
 static DECLARE_MUTEX(__ip_vs_mutex);
 
 /* lock for service table */
-static rwlock_t __ip_vs_svc_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(__ip_vs_svc_lock);
 
 /* lock for table with the real services */
-static rwlock_t __ip_vs_rs_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(__ip_vs_rs_lock);
 
 /* lock for state and timeout tables */
-static rwlock_t __ip_vs_securetcp_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(__ip_vs_securetcp_lock);
 
 /* lock for drop entry handling */
-static spinlock_t __ip_vs_dropentry_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
 
 /* lock for drop packet handling */
-static spinlock_t __ip_vs_droppacket_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
 
 /* 1/rate drop and drop-entry variables */
 int ip_vs_drop_rate = 0;
diff -puN net/ipv4/ipvs/ip_vs_est.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_est.c
--- 25/net/ipv4/ipvs/ip_vs_est.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_est.c	Wed Jan 12 16:54:32 2005
@@ -62,7 +62,7 @@ struct ip_vs_estimator
 
 
 static struct ip_vs_estimator *est_list = NULL;
-static rwlock_t est_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(est_lock);
 static struct timer_list est_timer;
 
 static void estimation_timer(unsigned long arg)
diff -puN net/ipv4/ipvs/ip_vs_proto_tcp.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_proto_tcp.c
--- 25/net/ipv4/ipvs/ip_vs_proto_tcp.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_proto_tcp.c	Wed Jan 12 16:54:32 2005
@@ -510,7 +510,7 @@ tcp_state_transition(struct ip_vs_conn *
 #define	TCP_APP_TAB_MASK	(TCP_APP_TAB_SIZE - 1)
 
 static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static spinlock_t tcp_app_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(tcp_app_lock);
 
 static inline __u16 tcp_app_hashkey(__u16 port)
 {
diff -puN net/ipv4/ipvs/ip_vs_proto_udp.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_proto_udp.c
--- 25/net/ipv4/ipvs/ip_vs_proto_udp.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_proto_udp.c	Wed Jan 12 16:54:32 2005
@@ -277,7 +277,7 @@ udp_csum_check(struct sk_buff *skb, stru
 #define	UDP_APP_TAB_MASK	(UDP_APP_TAB_SIZE - 1)
 
 static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static spinlock_t udp_app_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(udp_app_lock);
 
 static inline __u16 udp_app_hashkey(__u16 port)
 {
diff -puN net/ipv4/ipvs/ip_vs_sched.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_sched.c
--- 25/net/ipv4/ipvs/ip_vs_sched.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_sched.c	Wed Jan 12 16:54:32 2005
@@ -33,7 +33,7 @@
 static LIST_HEAD(ip_vs_schedulers);
 
 /* lock for service table */
-static rwlock_t __ip_vs_sched_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(__ip_vs_sched_lock);
 
 
 /*
diff -puN net/ipv4/ipvs/ip_vs_sync.c~lock-initializer-cleanup-networking net/ipv4/ipvs/ip_vs_sync.c
--- 25/net/ipv4/ipvs/ip_vs_sync.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/ipvs/ip_vs_sync.c	Wed Jan 12 16:54:32 2005
@@ -119,11 +119,11 @@ struct ip_vs_sync_buff {
 
 /* the sync_buff list head and the lock */
 static LIST_HEAD(ip_vs_sync_queue);
-static spinlock_t ip_vs_sync_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ip_vs_sync_lock);
 
 /* current sync_buff for accepting new conn entries */
 static struct ip_vs_sync_buff   *curr_sb = NULL;
-static spinlock_t curr_sb_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(curr_sb_lock);
 
 /* ipvs sync daemon state */
 volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
diff -puN net/ipv4/netfilter/ip_nat_snmp_basic.c~lock-initializer-cleanup-networking net/ipv4/netfilter/ip_nat_snmp_basic.c
--- 25/net/ipv4/netfilter/ip_nat_snmp_basic.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/netfilter/ip_nat_snmp_basic.c	Wed Jan 12 16:54:32 2005
@@ -65,7 +65,7 @@ MODULE_DESCRIPTION("Basic SNMP Applicati
 #define NOCT1(n) (u_int8_t )((n) & 0xff)
 
 static int debug;
-static spinlock_t snmp_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(snmp_lock);
 
 /* 
  * Application layer address mapping mimics the NAT mapping, but 
diff -puN net/ipv4/netfilter/ip_queue.c~lock-initializer-cleanup-networking net/ipv4/netfilter/ip_queue.c
--- 25/net/ipv4/netfilter/ip_queue.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/netfilter/ip_queue.c	Wed Jan 12 16:54:32 2005
@@ -55,7 +55,7 @@ typedef int (*ipq_cmpfn)(struct ipq_queu
 
 static unsigned char copy_mode = IPQ_COPY_NONE;
 static unsigned int queue_maxlen = IPQ_QMAX_DEFAULT;
-static rwlock_t queue_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(queue_lock);
 static int peer_pid;
 static unsigned int copy_range;
 static unsigned int queue_total;
diff -puN net/ipv4/netfilter/ipt_limit.c~lock-initializer-cleanup-networking net/ipv4/netfilter/ipt_limit.c
--- 25/net/ipv4/netfilter/ipt_limit.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/netfilter/ipt_limit.c	Wed Jan 12 16:54:32 2005
@@ -29,7 +29,7 @@ MODULE_DESCRIPTION("iptables rate limit 
  * see net/sched/sch_tbf.c in the linux source tree
  */
 
-static spinlock_t limit_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(limit_lock);
 
 /* Rusty: This is my (non-mathematically-inclined) understanding of
    this algorithm.  The `average rate' in jiffies becomes your initial
diff -puN net/ipv4/netfilter/ipt_LOG.c~lock-initializer-cleanup-networking net/ipv4/netfilter/ipt_LOG.c
--- 25/net/ipv4/netfilter/ipt_LOG.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/netfilter/ipt_LOG.c	Wed Jan 12 16:54:32 2005
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(nflog, "register as int
 #endif
 
 /* Use lock to serialize, so printks don't overlap */
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(log_lock);
 
 /* One level of recursion won't kill us */
 static void dump_packet(const struct ipt_log_info *info,
diff -puN net/ipv4/netfilter/ipt_recent.c~lock-initializer-cleanup-networking net/ipv4/netfilter/ipt_recent.c
--- 25/net/ipv4/netfilter/ipt_recent.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/netfilter/ipt_recent.c	Wed Jan 12 16:54:32 2005
@@ -90,7 +90,7 @@ static struct recent_ip_tables *r_tables
 /* We protect r_list with this spinlock so two processors are not modifying
  * the list at the same time. 
  */
-static spinlock_t recent_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(recent_lock);
 
 #ifdef CONFIG_PROC_FS
 /* Our /proc/net/ipt_recent entry */
diff -puN net/ipv4/protocol.c~lock-initializer-cleanup-networking net/ipv4/protocol.c
--- 25/net/ipv4/protocol.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/protocol.c	Wed Jan 12 16:54:32 2005
@@ -49,7 +49,7 @@
 #include <linux/igmp.h>
 
 struct net_protocol *inet_protos[MAX_INET_PROTOS];
-static spinlock_t inet_proto_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(inet_proto_lock);
 
 /*
  *	Add a protocol handler to the hash tables
diff -puN net/ipv4/raw.c~lock-initializer-cleanup-networking net/ipv4/raw.c
--- 25/net/ipv4/raw.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/raw.c	Wed Jan 12 16:54:32 2005
@@ -81,7 +81,7 @@
 #include <linux/netfilter_ipv4.h>
 
 struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE];
-rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(raw_v4_lock);
 
 static void raw_v4_hash(struct sock *sk)
 {
diff -puN net/ipv4/route.c~lock-initializer-cleanup-networking net/ipv4/route.c
--- 25/net/ipv4/route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/route.c	Wed Jan 12 16:54:32 2005
@@ -582,7 +582,7 @@ static void rt_run_flush(unsigned long d
 	}
 }
 
-static spinlock_t rt_flush_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rt_flush_lock);
 
 void rt_cache_flush(int delay)
 {
@@ -901,7 +901,7 @@ restart:
 
 void rt_bind_peer(struct rtable *rt, int create)
 {
-	static spinlock_t rt_peer_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(rt_peer_lock);
 	struct inet_peer *peer;
 
 	peer = inet_getpeer(rt->rt_dst, create);
@@ -925,7 +925,7 @@ void rt_bind_peer(struct rtable *rt, int
  */
 static void ip_select_fb_ident(struct iphdr *iph)
 {
-	static spinlock_t ip_fb_id_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(ip_fb_id_lock);
 	static u32 ip_fallback_id;
 	u32 salt;
 
diff -puN net/ipv4/tcp_minisocks.c~lock-initializer-cleanup-networking net/ipv4/tcp_minisocks.c
--- 25/net/ipv4/tcp_minisocks.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/tcp_minisocks.c	Wed Jan 12 16:54:32 2005
@@ -419,7 +419,7 @@ static void tcp_twkill(unsigned long);
 #define TCP_TWKILL_QUOTA	100
 
 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
-static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(tw_death_lock);
 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
 static void twkill_work(void *);
 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
diff -puN net/ipv4/udp.c~lock-initializer-cleanup-networking net/ipv4/udp.c
--- 25/net/ipv4/udp.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv4/udp.c	Wed Jan 12 16:54:32 2005
@@ -115,7 +115,7 @@
 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
 
 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
-rwlock_t udp_hash_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(udp_hash_lock);
 
 /* Shared by v4/v6 udp. */
 int udp_port_rover;
diff -puN net/ipv6/addrconf.c~lock-initializer-cleanup-networking net/ipv6/addrconf.c
--- 25/net/ipv6/addrconf.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/addrconf.c	Wed Jan 12 16:54:32 2005
@@ -109,7 +109,7 @@ static void ipv6_regen_rndid(unsigned lo
 
 static int desync_factor = MAX_DESYNC_FACTOR * HZ;
 static struct crypto_tfm *md5_tfm;
-static spinlock_t md5_tfm_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(md5_tfm_lock);
 #endif
 
 static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -118,16 +118,16 @@ static int ipv6_count_addresses(struct i
  *	Configured unicast address hash table
  */
 static struct inet6_ifaddr		*inet6_addr_lst[IN6_ADDR_HSIZE];
-static rwlock_t	addrconf_hash_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(addrconf_hash_lock);
 
 /* Protects inet6 devices */
-rwlock_t addrconf_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(addrconf_lock);
 
 static void addrconf_verify(unsigned long);
 
 static struct timer_list addr_chk_timer =
 			TIMER_INITIALIZER(addrconf_verify, 0, 0);
-static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(addrconf_verify_lock);
 
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
diff -puN net/ipv6/af_inet6.c~lock-initializer-cleanup-networking net/ipv6/af_inet6.c
--- 25/net/ipv6/af_inet6.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/af_inet6.c	Wed Jan 12 16:54:32 2005
@@ -94,7 +94,7 @@ atomic_t inet6_sock_nr;
  * build a new socket.
  */
 static struct list_head inetsw6[SOCK_MAX];
-static spinlock_t inetsw6_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(inetsw6_lock);
 
 static void inet6_sock_destruct(struct sock *sk)
 {
diff -puN net/ipv6/anycast.c~lock-initializer-cleanup-networking net/ipv6/anycast.c
--- 25/net/ipv6/anycast.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/anycast.c	Wed Jan 12 16:54:32 2005
@@ -44,7 +44,7 @@
 #include <net/checksum.h>
 
 /* Big ac list lock for all the sockets */
-static rwlock_t ipv6_sk_ac_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipv6_sk_ac_lock);
 
 /* XXX ip6_addr_match() and ip6_onlink() really belong in net/core.c */
 
diff -puN net/ipv6/ip6_fib.c~lock-initializer-cleanup-networking net/ipv6/ip6_fib.c
--- 25/net/ipv6/ip6_fib.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/ip6_fib.c	Wed Jan 12 16:54:32 2005
@@ -69,7 +69,7 @@ struct fib6_cleaner_t
 	void *arg;
 };
 
-rwlock_t fib6_walker_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(fib6_walker_lock);
 
 
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1205,7 +1205,7 @@ static int fib6_age(struct rt6_info *rt,
 	return 0;
 }
 
-static spinlock_t fib6_gc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(fib6_gc_lock);
 
 void fib6_run_gc(unsigned long dummy)
 {
diff -puN net/ipv6/ip6_flowlabel.c~lock-initializer-cleanup-networking net/ipv6/ip6_flowlabel.c
--- 25/net/ipv6/ip6_flowlabel.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/ip6_flowlabel.c	Wed Jan 12 16:54:32 2005
@@ -54,11 +54,11 @@ static struct timer_list ip6_fl_gc_timer
 
 /* FL hash table lock: it protects only of GC */
 
-static rwlock_t ip6_fl_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ip6_fl_lock);
 
 /* Big socket sock */
 
-static rwlock_t ip6_sk_fl_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ip6_sk_fl_lock);
 
 
 static __inline__ struct ip6_flowlabel * __fl_lookup(u32 label)
diff -puN net/ipv6/ip6_output.c~lock-initializer-cleanup-networking net/ipv6/ip6_output.c
--- 25/net/ipv6/ip6_output.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/ip6_output.c	Wed Jan 12 16:54:32 2005
@@ -61,7 +61,7 @@ static int ip6_fragment(struct sk_buff *
 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
 {
 	static u32 ipv6_fragmentation_id = 1;
-	static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(ip6_id_lock);
 
 	spin_lock_bh(&ip6_id_lock);
 	fhdr->identification = htonl(ipv6_fragmentation_id);
diff -puN net/ipv6/ip6_tunnel.c~lock-initializer-cleanup-networking net/ipv6/ip6_tunnel.c
--- 25/net/ipv6/ip6_tunnel.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/ip6_tunnel.c	Wed Jan 12 16:54:32 2005
@@ -85,7 +85,7 @@ static struct ip6_tnl *tnls_wc[1];
 static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l };
 
 /* lock for the tunnel lists */
-static rwlock_t ip6ip6_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ip6ip6_lock);
 
 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
diff -puN net/ipv6/ipv6_sockglue.c~lock-initializer-cleanup-networking net/ipv6/ipv6_sockglue.c
--- 25/net/ipv6/ipv6_sockglue.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/ipv6_sockglue.c	Wed Jan 12 16:54:32 2005
@@ -63,7 +63,7 @@ static struct packet_type ipv6_packet_ty
 };
 
 struct ip6_ra_chain *ip6_ra_chain;
-rwlock_t ip6_ra_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(ip6_ra_lock);
 
 int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
 {
diff -puN net/ipv6/mcast.c~lock-initializer-cleanup-networking net/ipv6/mcast.c
--- 25/net/ipv6/mcast.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/mcast.c	Wed Jan 12 16:54:32 2005
@@ -124,7 +124,7 @@ struct mld2_query {
 struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
 
 /* Big mc list lock for all the sockets */
-static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipv6_sk_mc_lock);
 
 static struct socket *igmp6_socket;
 
diff -puN net/ipv6/netfilter/ip6_queue.c~lock-initializer-cleanup-networking net/ipv6/netfilter/ip6_queue.c
--- 25/net/ipv6/netfilter/ip6_queue.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/netfilter/ip6_queue.c	Wed Jan 12 16:54:32 2005
@@ -60,7 +60,7 @@ typedef int (*ipq_cmpfn)(struct ipq_queu
 
 static unsigned char copy_mode = IPQ_COPY_NONE;
 static unsigned int queue_maxlen = IPQ_QMAX_DEFAULT;
-static rwlock_t queue_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(queue_lock);
 static int peer_pid;
 static unsigned int copy_range;
 static unsigned int queue_total;
diff -puN net/ipv6/netfilter/ip6t_limit.c~lock-initializer-cleanup-networking net/ipv6/netfilter/ip6t_limit.c
--- 25/net/ipv6/netfilter/ip6t_limit.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/netfilter/ip6t_limit.c	Wed Jan 12 16:54:32 2005
@@ -29,7 +29,7 @@ MODULE_DESCRIPTION("rate limiting within
  * see net/sched/sch_tbf.c in the linux source tree
  */
 
-static spinlock_t limit_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(limit_lock);
 
 /* Rusty: This is my (non-mathematically-inclined) understanding of
    this algorithm.  The `average rate' in jiffies becomes your initial
diff -puN net/ipv6/netfilter/ip6t_LOG.c~lock-initializer-cleanup-networking net/ipv6/netfilter/ip6t_LOG.c
--- 25/net/ipv6/netfilter/ip6t_LOG.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/netfilter/ip6t_LOG.c	Wed Jan 12 16:54:32 2005
@@ -41,7 +41,7 @@ struct in_device;
 #endif
 
 /* Use lock to serialize, so printks don't overlap */
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(log_lock);
 
 /* One level of recursion won't kill us */
 static void dump_packet(const struct ip6t_log_info *info,
diff -puN net/ipv6/protocol.c~lock-initializer-cleanup-networking net/ipv6/protocol.c
--- 25/net/ipv6/protocol.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/protocol.c	Wed Jan 12 16:54:32 2005
@@ -40,7 +40,7 @@
 #include <net/protocol.h>
 
 struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
-static spinlock_t inet6_proto_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(inet6_proto_lock);
 
 
 int inet6_add_protocol(struct inet6_protocol *prot, unsigned char protocol)
diff -puN net/ipv6/raw.c~lock-initializer-cleanup-networking net/ipv6/raw.c
--- 25/net/ipv6/raw.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/raw.c	Wed Jan 12 16:54:32 2005
@@ -56,7 +56,7 @@
 #include <linux/seq_file.h>
 
 struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
-rwlock_t raw_v6_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(raw_v6_lock);
 
 static void raw_v6_hash(struct sock *sk)
 {
diff -puN net/ipv6/reassembly.c~lock-initializer-cleanup-networking net/ipv6/reassembly.c
--- 25/net/ipv6/reassembly.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/reassembly.c	Wed Jan 12 16:54:32 2005
@@ -103,7 +103,7 @@ struct frag_queue
 #define IP6Q_HASHSZ	64
 
 static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
-static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ip6_frag_lock);
 static u32 ip6_frag_hash_rnd;
 static LIST_HEAD(ip6_frag_lru_list);
 int ip6_frag_nqueues = 0;
diff -puN net/ipv6/route.c~lock-initializer-cleanup-networking net/ipv6/route.c
--- 25/net/ipv6/route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/route.c	Wed Jan 12 16:54:32 2005
@@ -133,7 +133,7 @@ struct fib6_node ip6_routing_table = {
 
 /* Protects all the ip6 fib */
 
-rwlock_t rt6_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(rt6_lock);
 
 
 /* allocate dst with ip6_dst_ops */
@@ -209,7 +209,7 @@ static __inline__ struct rt6_info *rt6_d
  *	pointer to the last default router chosen. BH is disabled locally.
  */
 struct rt6_info *rt6_dflt_pointer;
-spinlock_t rt6_dflt_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(rt6_dflt_lock);
 
 void rt6_reset_dflt_pointer(struct rt6_info *rt)
 {
diff -puN net/ipv6/sit.c~lock-initializer-cleanup-networking net/ipv6/sit.c
--- 25/net/ipv6/sit.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/sit.c	Wed Jan 12 16:54:32 2005
@@ -73,7 +73,7 @@ static struct ip_tunnel *tunnels_l[HASH_
 static struct ip_tunnel *tunnels_wc[1];
 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
 
-static rwlock_t ipip6_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipip6_lock);
 
 static struct ip_tunnel * ipip6_tunnel_lookup(u32 remote, u32 local)
 {
diff -puN net/ipv6/xfrm6_tunnel.c~lock-initializer-cleanup-networking net/ipv6/xfrm6_tunnel.c
--- 25/net/ipv6/xfrm6_tunnel.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipv6/xfrm6_tunnel.c	Wed Jan 12 16:54:32 2005
@@ -72,7 +72,7 @@ struct xfrm6_tunnel_spi {
 # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
 #endif
 
-static rwlock_t xfrm6_tunnel_spi_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
 
 static u32 xfrm6_tunnel_spi;
 
diff -puN net/ipx/af_ipx.c~lock-initializer-cleanup-networking net/ipx/af_ipx.c
--- 25/net/ipx/af_ipx.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipx/af_ipx.c	Wed Jan 12 16:54:32 2005
@@ -78,7 +78,7 @@ static struct datalink_proto *pSNAP_data
 static struct proto_ops ipx_dgram_ops;
 
 LIST_HEAD(ipx_interfaces);
-spinlock_t ipx_interfaces_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(ipx_interfaces_lock);
 
 struct ipx_interface *ipx_primary_net;
 struct ipx_interface *ipx_internal_net;
diff -puN net/ipx/ipx_route.c~lock-initializer-cleanup-networking net/ipx/ipx_route.c
--- 25/net/ipx/ipx_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/ipx/ipx_route.c	Wed Jan 12 16:54:32 2005
@@ -16,7 +16,7 @@
 #include <net/sock.h>
 
 LIST_HEAD(ipx_routes);
-rwlock_t ipx_routes_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(ipx_routes_lock);
 
 extern struct ipx_interface *ipx_internal_net;
 
diff -puN net/key/af_key.c~lock-initializer-cleanup-networking net/key/af_key.c
--- 25/net/key/af_key.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/key/af_key.c	Wed Jan 12 16:54:33 2005
@@ -37,7 +37,7 @@
 /* List of all pfkey sockets. */
 static HLIST_HEAD(pfkey_table);
 static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait);
-static rwlock_t pfkey_table_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(pfkey_table_lock);
 static atomic_t pfkey_table_users = ATOMIC_INIT(0);
 
 static atomic_t pfkey_socks_nr = ATOMIC_INIT(0);
@@ -2344,7 +2344,7 @@ static u32 get_acqseq(void)
 {
 	u32 res;
 	static u32 acqseq;
-	static spinlock_t acqseq_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(acqseq_lock);
 
 	spin_lock_bh(&acqseq_lock);
 	res = (++acqseq ? : ++acqseq);
diff -puN net/lapb/lapb_iface.c~lock-initializer-cleanup-networking net/lapb/lapb_iface.c
--- 25/net/lapb/lapb_iface.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/lapb/lapb_iface.c	Wed Jan 12 16:54:33 2005
@@ -40,7 +40,7 @@
 #include <net/lapb.h>
 
 static struct list_head lapb_list = LIST_HEAD_INIT(lapb_list);
-static rwlock_t lapb_list_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(lapb_list_lock);
 
 /*
  *	Free an allocated lapb control block. 
diff -puN net/llc/llc_core.c~lock-initializer-cleanup-networking net/llc/llc_core.c
--- 25/net/llc/llc_core.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/llc/llc_core.c	Wed Jan 12 16:54:33 2005
@@ -22,7 +22,7 @@
 #include <net/llc.h>
 
 LIST_HEAD(llc_sap_list);
-rwlock_t llc_sap_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(llc_sap_list_lock);
 
 unsigned char llc_station_mac_sa[ETH_ALEN];
 
diff -puN net/netlink/af_netlink.c~lock-initializer-cleanup-networking net/netlink/af_netlink.c
--- 25/net/netlink/af_netlink.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/netlink/af_netlink.c	Wed Jan 12 16:54:33 2005
@@ -105,7 +105,7 @@ static struct socket *netlink_kernel[MAX
 static int netlink_dump(struct sock *sk);
 static void netlink_destroy_callback(struct netlink_callback *cb);
 
-static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static struct notifier_block *netlink_chain;
@@ -1204,7 +1204,7 @@ void netlink_ack(struct sk_buff *in_skb,
 
 #ifdef NL_EMULATE_DEV
 
-static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(nl_emu_lock);
 
 /*
  *	Backward compatibility.
diff -puN net/netrom/af_netrom.c~lock-initializer-cleanup-networking net/netrom/af_netrom.c
--- 25/net/netrom/af_netrom.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/netrom/af_netrom.c	Wed Jan 12 16:54:33 2005
@@ -60,7 +60,7 @@ int sysctl_netrom_link_fails_count      
 static unsigned short circuit = 0x101;
 
 static HLIST_HEAD(nr_list);
-static spinlock_t nr_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(nr_list_lock);
 
 static struct proto_ops nr_proto_ops;
 void nr_init_timers(struct sock *sk);
diff -puN net/netrom/nr_route.c~lock-initializer-cleanup-networking net/netrom/nr_route.c
--- 25/net/netrom/nr_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/netrom/nr_route.c	Wed Jan 12 16:54:33 2005
@@ -41,9 +41,9 @@
 static unsigned int nr_neigh_no = 1;
 
 static HLIST_HEAD(nr_node_list);
-static spinlock_t nr_node_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(nr_node_list_lock);
 static HLIST_HEAD(nr_neigh_list);
-static spinlock_t nr_neigh_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(nr_neigh_list_lock);
 
 static struct nr_node *nr_node_get(ax25_address *callsign)
 {
diff -puN net/packet/af_packet.c~lock-initializer-cleanup-networking net/packet/af_packet.c
--- 25/net/packet/af_packet.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/packet/af_packet.c	Wed Jan 12 16:54:33 2005
@@ -146,7 +146,7 @@ dev->hard_header == NULL (ll header is a
 
 /* List of all packet sockets. */
 static HLIST_HEAD(packet_sklist);
-static rwlock_t packet_sklist_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(packet_sklist_lock);
 
 static atomic_t packet_socks_nr;
 
diff -puN net/rose/af_rose.c~lock-initializer-cleanup-networking net/rose/af_rose.c
--- 25/net/rose/af_rose.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rose/af_rose.c	Wed Jan 12 16:54:33 2005
@@ -59,7 +59,7 @@ int sysctl_rose_maximum_vcs             
 int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
 
 static HLIST_HEAD(rose_list);
-static spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rose_list_lock);
 
 static struct proto_ops rose_proto_ops;
 
diff -puN net/rose/rose_route.c~lock-initializer-cleanup-networking net/rose/rose_route.c
--- 25/net/rose/rose_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rose/rose_route.c	Wed Jan 12 16:54:33 2005
@@ -40,11 +40,11 @@
 static unsigned int rose_neigh_no = 1;
 
 static struct rose_node  *rose_node_list;
-static spinlock_t rose_node_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rose_node_list_lock);
 static struct rose_neigh *rose_neigh_list;
-static spinlock_t rose_neigh_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rose_neigh_list_lock);
 static struct rose_route *rose_route_list;
-static spinlock_t rose_route_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rose_route_list_lock);
 
 struct rose_neigh *rose_loopback_neigh;
 
diff -puN net/rxrpc/krxiod.c~lock-initializer-cleanup-networking net/rxrpc/krxiod.c
--- 25/net/rxrpc/krxiod.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rxrpc/krxiod.c	Wed Jan 12 16:54:33 2005
@@ -25,10 +25,10 @@ static DECLARE_COMPLETION(rxrpc_krxiod_d
 static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
 
 static LIST_HEAD(rxrpc_krxiod_transportq);
-static spinlock_t rxrpc_krxiod_transportq_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
 
 static LIST_HEAD(rxrpc_krxiod_callq);
-static spinlock_t rxrpc_krxiod_callq_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
 
 static volatile int rxrpc_krxiod_die;
 
diff -puN net/rxrpc/krxsecd.c~lock-initializer-cleanup-networking net/rxrpc/krxsecd.c
--- 25/net/rxrpc/krxsecd.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rxrpc/krxsecd.c	Wed Jan 12 16:54:33 2005
@@ -39,7 +39,7 @@ static atomic_t rxrpc_krxsecd_qcount;
 /* queue of unprocessed inbound messages with seqno #1 and
  * RXRPC_CLIENT_INITIATED flag set */
 static LIST_HEAD(rxrpc_krxsecd_initmsgq);
-static spinlock_t rxrpc_krxsecd_initmsgq_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock);
 
 static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
 
diff -puN net/rxrpc/krxtimod.c~lock-initializer-cleanup-networking net/rxrpc/krxtimod.c
--- 25/net/rxrpc/krxtimod.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rxrpc/krxtimod.c	Wed Jan 12 16:54:33 2005
@@ -24,7 +24,7 @@ static DECLARE_WAIT_QUEUE_HEAD(krxtimod_
 static int krxtimod_die;
 
 static LIST_HEAD(krxtimod_list);
-static spinlock_t krxtimod_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(krxtimod_lock);
 
 static int krxtimod(void *arg);
 
diff -puN net/rxrpc/transport.c~lock-initializer-cleanup-networking net/rxrpc/transport.c
--- 25/net/rxrpc/transport.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/rxrpc/transport.c	Wed Jan 12 16:54:33 2005
@@ -39,7 +39,7 @@ struct errormsg {
 	struct sockaddr_in		icmp_src;	/* ICMP packet source address */
 };
 
-static spinlock_t rxrpc_transports_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rxrpc_transports_lock);
 static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
 
 __RXACCT_DECL(atomic_t rxrpc_transport_count);
diff -puN net/sched/act_api.c~lock-initializer-cleanup-networking net/sched/act_api.c
--- 25/net/sched/act_api.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/act_api.c	Wed Jan 12 16:54:33 2005
@@ -46,7 +46,7 @@
 #endif
 
 static struct tc_action_ops *act_base = NULL;
-static rwlock_t act_mod_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(act_mod_lock);
 
 int tcf_register_action(struct tc_action_ops *act)
 {
diff -puN net/sched/cls_api.c~lock-initializer-cleanup-networking net/sched/cls_api.c
--- 25/net/sched/cls_api.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/cls_api.c	Wed Jan 12 16:54:33 2005
@@ -49,7 +49,7 @@
 static struct tcf_proto_ops *tcf_proto_base;
 
 /* Protects list of registered TC modules. It is pure SMP lock. */
-static rwlock_t cls_mod_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(cls_mod_lock);
 
 /* Find classifier type by string name */
 
diff -puN net/sched/estimator.c~lock-initializer-cleanup-networking net/sched/estimator.c
--- 25/net/sched/estimator.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/estimator.c	Wed Jan 12 16:54:33 2005
@@ -95,7 +95,7 @@ struct qdisc_estimator_head
 static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
 
 /* Estimator array lock */
-static rwlock_t est_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(est_lock);
 
 static void est_timer(unsigned long arg)
 {
diff -puN net/sched/gact.c~lock-initializer-cleanup-networking net/sched/gact.c
--- 25/net/sched/gact.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/gact.c	Wed Jan 12 16:54:33 2005
@@ -40,7 +40,7 @@
 #define MY_TAB_MASK	15
 static u32 idx_gen;
 static struct tcf_gact *tcf_gact_ht[MY_TAB_SIZE];
-static rwlock_t gact_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(gact_lock);
 
 /* ovewrride the defaults */
 #define tcf_st  tcf_gact
diff -puN net/sched/ipt.c~lock-initializer-cleanup-networking net/sched/ipt.c
--- 25/net/sched/ipt.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/ipt.c	Wed Jan 12 16:54:33 2005
@@ -46,7 +46,7 @@
 static u32 idx_gen;
 static struct tcf_ipt *tcf_ipt_ht[MY_TAB_SIZE];
 /* ipt hash table lock */
-static rwlock_t ipt_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ipt_lock);
 
 /* ovewrride the defaults */
 #define tcf_st  tcf_ipt
diff -puN net/sched/mirred.c~lock-initializer-cleanup-networking net/sched/mirred.c
--- 25/net/sched/mirred.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/mirred.c	Wed Jan 12 16:54:33 2005
@@ -46,7 +46,7 @@
 #define MY_TAB_MASK     (MY_TAB_SIZE - 1)
 static u32 idx_gen;
 static struct tcf_mirred *tcf_mirred_ht[MY_TAB_SIZE];
-static rwlock_t mirred_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(mirred_lock);
 
 /* ovewrride the defaults */
 #define tcf_st  tcf_mirred
diff -puN net/sched/pedit.c~lock-initializer-cleanup-networking net/sched/pedit.c
--- 25/net/sched/pedit.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/pedit.c	Wed Jan 12 16:54:33 2005
@@ -42,7 +42,7 @@
 #define MY_TAB_MASK     15
 static u32 idx_gen;
 static struct tcf_pedit *tcf_pedit_ht[MY_TAB_SIZE];
-static rwlock_t pedit_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(pedit_lock);
 
 #define tcf_st  tcf_pedit
 #define tc_st  tc_pedit
diff -puN net/sched/police.c~lock-initializer-cleanup-networking net/sched/police.c
--- 25/net/sched/police.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/police.c	Wed Jan 12 16:54:33 2005
@@ -43,7 +43,7 @@
 static u32 idx_gen;
 static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
 /* Policer hash table lock */
-static rwlock_t police_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(police_lock);
 
 /* Each policer is serialized by its individual spinlock */
 
diff -puN net/sched/sch_api.c~lock-initializer-cleanup-networking net/sched/sch_api.c
--- 25/net/sched/sch_api.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/sch_api.c	Wed Jan 12 16:54:33 2005
@@ -131,7 +131,7 @@ static int tclass_notify(struct sk_buff 
  */
 
 /* Protects list of registered TC modules. It is pure SMP lock. */
-static rwlock_t qdisc_mod_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(qdisc_mod_lock);
 
 
 /************************************************
diff -puN net/sched/sch_generic.c~lock-initializer-cleanup-networking net/sched/sch_generic.c
--- 25/net/sched/sch_generic.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sched/sch_generic.c	Wed Jan 12 16:54:33 2005
@@ -54,7 +54,7 @@
 
    qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
  */
-rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(qdisc_tree_lock);
 
 void qdisc_lock_tree(struct net_device *dev)
 {
diff -puN net/sctp/protocol.c~lock-initializer-cleanup-networking net/sctp/protocol.c
--- 25/net/sctp/protocol.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sctp/protocol.c	Wed Jan 12 16:54:33 2005
@@ -65,7 +65,7 @@ struct proc_dir_entry	*proc_net_sctp;
 DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
 
 struct idr sctp_assocs_id;
-spinlock_t sctp_assocs_id_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(sctp_assocs_id_lock);
 
 /* This is the global socket data structure used for responding to
  * the Out-of-the-blue (OOTB) packets.  A control sock will be created
diff -puN net/socket.c~lock-initializer-cleanup-networking net/socket.c
--- 25/net/socket.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/socket.c	Wed Jan 12 16:54:33 2005
@@ -144,7 +144,7 @@ static struct net_proto_family *net_fami
 
 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
 static atomic_t net_family_lockct = ATOMIC_INIT(0);
-static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(net_family_lock);
 
 /* The strategy is: modifications net_family vector are short, do not
    sleep and veeery rare, but read access should be free of any exclusive
diff -puN net/sunrpc/auth.c~lock-initializer-cleanup-networking net/sunrpc/auth.c
--- 25/net/sunrpc/auth.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/auth.c	Wed Jan 12 16:54:33 2005
@@ -89,7 +89,7 @@ rpcauth_destroy(struct rpc_auth *auth)
 	kfree(auth);
 }
 
-static spinlock_t rpc_credcache_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rpc_credcache_lock);
 
 /*
  * Initialize RPC credential cache
diff -puN net/sunrpc/auth_gss/auth_gss.c~lock-initializer-cleanup-networking net/sunrpc/auth_gss/auth_gss.c
--- 25/net/sunrpc/auth_gss/auth_gss.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/auth_gss/auth_gss.c	Wed Jan 12 16:54:33 2005
@@ -80,7 +80,7 @@ static struct rpc_credops gss_credops;
 /* dump the buffer in `emacs-hexl' style */
 #define isprint(c)      ((c > 0x1f) && (c < 0x7f))
 
-static rwlock_t gss_ctx_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(gss_ctx_lock);
 
 struct gss_auth {
 	struct rpc_auth rpc_auth;
diff -puN net/sunrpc/auth_gss/gss_mech_switch.c~lock-initializer-cleanup-networking net/sunrpc/auth_gss/gss_mech_switch.c
--- 25/net/sunrpc/auth_gss/gss_mech_switch.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/auth_gss/gss_mech_switch.c	Wed Jan 12 16:54:33 2005
@@ -51,7 +51,7 @@
 #endif
 
 static LIST_HEAD(registered_mechs);
-static spinlock_t registered_mechs_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(registered_mechs_lock);
 
 static void
 gss_mech_free(struct gss_api_mech *gm)
diff -puN net/sunrpc/cache.c~lock-initializer-cleanup-networking net/sunrpc/cache.c
--- 25/net/sunrpc/cache.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/cache.c	Wed Jan 12 16:54:33 2005
@@ -161,7 +161,7 @@ void cache_fresh(struct cache_detail *de
  */
 
 static LIST_HEAD(cache_list);
-static spinlock_t cache_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cache_list_lock);
 static struct cache_detail *current_detail;
 static int current_index;
 
@@ -405,7 +405,7 @@ void cache_purge(struct cache_detail *de
 
 #define	DFR_MAX	300	/* ??? */
 
-static spinlock_t cache_defer_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cache_defer_lock);
 static LIST_HEAD(cache_defer_list);
 static struct list_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
@@ -533,7 +533,7 @@ void cache_clean_deferred(void *owner)
  *
  */
 
-static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(queue_lock);
 static DECLARE_MUTEX(queue_io_sem);
 
 struct cache_queue {
diff -puN net/sunrpc/pmap_clnt.c~lock-initializer-cleanup-networking net/sunrpc/pmap_clnt.c
--- 25/net/sunrpc/pmap_clnt.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/pmap_clnt.c	Wed Jan 12 16:54:33 2005
@@ -32,7 +32,7 @@ static struct rpc_procinfo	pmap_procedur
 static struct rpc_clnt *	pmap_create(char *, struct sockaddr_in *, int);
 static void			pmap_getport_done(struct rpc_task *);
 static struct rpc_program	pmap_program;
-static spinlock_t		pmap_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pmap_lock);
 
 /*
  * Obtain the port for a given RPC service on a given host. This one can
diff -puN net/sunrpc/sched.c~lock-initializer-cleanup-networking net/sunrpc/sched.c
--- 25/net/sunrpc/sched.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/sched.c	Wed Jan 12 16:54:33 2005
@@ -71,7 +71,7 @@ static struct workqueue_struct *rpciod_w
 /*
  * Spinlock for other critical sections of code.
  */
-static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rpc_sched_lock);
 
 /*
  * Disable the timer for a given RPC task. Should be called with
diff -puN net/sunrpc/svcauth.c~lock-initializer-cleanup-networking net/sunrpc/svcauth.c
--- 25/net/sunrpc/svcauth.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/sunrpc/svcauth.c	Wed Jan 12 16:54:33 2005
@@ -28,7 +28,7 @@
 extern struct auth_ops svcauth_null;
 extern struct auth_ops svcauth_unix;
 
-static spinlock_t authtab_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(authtab_lock);
 static struct auth_ops	*authtab[RPC_AUTH_MAXFLAVOR] = {
 	[0] = &svcauth_null,
 	[1] = &svcauth_unix,
diff -puN net/unix/af_unix.c~lock-initializer-cleanup-networking net/unix/af_unix.c
--- 25/net/unix/af_unix.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/unix/af_unix.c	Wed Jan 12 16:54:33 2005
@@ -124,7 +124,7 @@ int sysctl_unix_max_dgram_qlen = 10;
 static kmem_cache_t *unix_sk_cachep;
 
 struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-rwlock_t unix_table_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(unix_table_lock);
 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
 
 #define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])
diff -puN net/wanrouter/af_wanpipe.c~lock-initializer-cleanup-networking net/wanrouter/af_wanpipe.c
--- 25/net/wanrouter/af_wanpipe.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/wanrouter/af_wanpipe.c	Wed Jan 12 16:54:33 2005
@@ -158,7 +158,7 @@ static void dbg_kfree(void * v, int line
 
 /* List of all wanpipe sockets. */
 HLIST_HEAD(wanpipe_sklist);
-static rwlock_t wanpipe_sklist_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(wanpipe_sklist_lock);
 
 atomic_t wanpipe_socks_nr;
 static unsigned long wanpipe_tx_critical;
diff -puN net/x25/af_x25.c~lock-initializer-cleanup-networking net/x25/af_x25.c
--- 25/net/x25/af_x25.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/x25/af_x25.c	Wed Jan 12 16:54:33 2005
@@ -58,7 +58,7 @@ int sysctl_x25_clear_request_timeout   =
 int sysctl_x25_ack_holdback_timeout    = X25_DEFAULT_T2;
 
 HLIST_HEAD(x25_list);
-rwlock_t x25_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(x25_list_lock);
 
 static struct proto_ops x25_proto_ops;
 
diff -puN net/x25/x25_link.c~lock-initializer-cleanup-networking net/x25/x25_link.c
--- 25/net/x25/x25_link.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/x25/x25_link.c	Wed Jan 12 16:54:33 2005
@@ -31,7 +31,7 @@
 #include <net/x25.h>
 
 static struct list_head x25_neigh_list = LIST_HEAD_INIT(x25_neigh_list);
-static rwlock_t x25_neigh_list_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(x25_neigh_list_lock);
 
 static void x25_t20timer_expiry(unsigned long);
 
diff -puN net/x25/x25_route.c~lock-initializer-cleanup-networking net/x25/x25_route.c
--- 25/net/x25/x25_route.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/x25/x25_route.c	Wed Jan 12 16:54:33 2005
@@ -23,7 +23,7 @@
 #include <net/x25.h>
 
 struct list_head x25_route_list = LIST_HEAD_INIT(x25_route_list);
-rwlock_t x25_route_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(x25_route_list_lock);
 
 /*
  *	Add a new route.
diff -puN net/xfrm/xfrm_policy.c~lock-initializer-cleanup-networking net/xfrm/xfrm_policy.c
--- 25/net/xfrm/xfrm_policy.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/xfrm/xfrm_policy.c	Wed Jan 12 16:54:33 2005
@@ -26,11 +26,11 @@
 
 DECLARE_MUTEX(xfrm_cfg_sem);
 
-static rwlock_t xfrm_policy_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_policy_lock);
 
 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
 
-static rwlock_t xfrm_policy_afinfo_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
 
 static kmem_cache_t *xfrm_dst_cache;
@@ -38,7 +38,7 @@ static kmem_cache_t *xfrm_dst_cache;
 static struct work_struct xfrm_policy_gc_work;
 static struct list_head xfrm_policy_gc_list =
 	LIST_HEAD_INIT(xfrm_policy_gc_list);
-static spinlock_t xfrm_policy_gc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
 
 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
diff -puN net/xfrm/xfrm_state.c~lock-initializer-cleanup-networking net/xfrm/xfrm_state.c
--- 25/net/xfrm/xfrm_state.c~lock-initializer-cleanup-networking	Wed Jan 12 16:54:32 2005
+++ 25-akpm/net/xfrm/xfrm_state.c	Wed Jan 12 16:54:33 2005
@@ -26,7 +26,7 @@
       destination/tunnel endpoint. (output)
  */
 
-static spinlock_t xfrm_state_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(xfrm_state_lock);
 
 /* Hash table to find appropriate SA towards given target (endpoint
  * of tunnel or destination of transport mode) allowed by selector.
@@ -39,12 +39,12 @@ static struct list_head xfrm_state_byspi
 
 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
 
-static rwlock_t xfrm_state_afinfo_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
 
 static struct work_struct xfrm_state_gc_work;
 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
-static spinlock_t xfrm_state_gc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(xfrm_state_gc_lock);
 
 static void __xfrm_state_delete(struct xfrm_state *x);
 
@@ -619,7 +619,7 @@ u32 xfrm_get_acqseq(void)
 {
 	u32 res;
 	static u32 acqseq;
-	static spinlock_t acqseq_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(acqseq_lock);
 
 	spin_lock_bh(&acqseq_lock);
 	res = (++acqseq ? : ++acqseq);
@@ -747,7 +747,7 @@ void xfrm_replay_advance(struct xfrm_sta
 }
 
 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
-static rwlock_t		xfrm_km_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_km_lock);
 
 static void km_state_expired(struct xfrm_state *x, int hard)
 {
_
