[GFS2] Move glock hash table out of superblock

There are several reasons why we want to do this:
 - Firstly its large and thus we'll scale better with multiple
   GFS2 fs mounted at the same time
 - Secondly its easier to scale its size as required (thats a plan
   for later patches)
 - Thirdly, we can use kzalloc rather than vmalloc when allocating
   the superblock (its now only 4888 bytes)
 - Fourth its all part of my plan to eventually be able to use RCU
   with the glock hash.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 0076967..5759f52 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -49,6 +49,8 @@
 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
 static int dump_glock(struct gfs2_glock *gl);
 
+static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
+
 /**
  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  * @actual: the current state of the lock
@@ -231,10 +233,10 @@
  * Returns: NULL, or the struct gfs2_glock with the requested number
  */
 
-static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
+static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
 					  const struct lm_lockname *name)
 {
-	struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(sdp, name)];
+	struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)];
 	struct gfs2_glock *gl;
 
 	read_lock(&bucket->hb_lock);
@@ -268,7 +270,7 @@
 
 	name.ln_number = number;
 	name.ln_type = glops->go_type;
-	bucket = &sdp->sd_gl_hash[gl_hash(sdp, &name)];
+	bucket = &gl_hash_table[gl_hash(sdp, &name)];
 
 	read_lock(&bucket->hb_lock);
 	gl = search_bucket(bucket, sdp, &name);
@@ -648,9 +650,9 @@
 	set_bit(HIF_MUTEX, &gh.gh_iflags);
 
 	spin_lock(&gl->gl_spin);
-	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
 		list_add_tail(&gh.gh_list, &gl->gl_waiters1);
-	else {
+	} else {
 		gl->gl_owner = current;
 		gl->gl_ip = (unsigned long)__builtin_return_address(0);
 		complete(&gh.gh_wait);
@@ -673,9 +675,9 @@
 	int acquired = 1;
 
 	spin_lock(&gl->gl_spin);
-	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
 		acquired = 0;
-	else {
+	} else {
 		gl->gl_owner = current;
 		gl->gl_ip = (unsigned long)__builtin_return_address(0);
 	}
@@ -830,9 +832,9 @@
 		spin_lock(&gl->gl_spin);
 		list_del_init(&gh->gh_list);
 		if (gl->gl_state == gh->gh_state ||
-		    gl->gl_state == LM_ST_UNLOCKED)
+		    gl->gl_state == LM_ST_UNLOCKED) {
 			gh->gh_error = 0;
-		else {
+		} else {
 			if (gfs2_assert_warn(sdp, gh->gh_flags &
 					(LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
 				fs_warn(sdp, "ret = 0x%.8X\n", ret);
@@ -1090,8 +1092,7 @@
 		return gh->gh_error;
 
 	gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
-	gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
-						   gh->gh_state,
+	gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
 						   gh->gh_flags));
 
 	if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
@@ -1901,6 +1902,8 @@
 
 			if (test_bit(GLF_PLUG, &gl->gl_flags))
 				continue;
+			if (gl->gl_sbd != sdp)
+				continue;
 
 			/* examiner() must glock_put() */
 			gfs2_glock_hold(gl);
@@ -1953,7 +1956,7 @@
 	unsigned int x;
 
 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
-		examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
+		examine_bucket(scan_glock, sdp, &gl_hash_table[x]);
 		cond_resched();
 	}
 }
@@ -2012,7 +2015,7 @@
 		cont = 0;
 
 		for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
-			if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x]))
+			if (examine_bucket(clear_glock, sdp, &gl_hash_table[x]))
 				cont = 1;
 
 		if (!wait || !cont)
@@ -2114,14 +2117,13 @@
 
 	spin_lock(&gl->gl_spin);
 
-	printk(KERN_INFO "Glock 0x%p (%u, %llu)\n",
-	       gl,
-	       gl->gl_name.ln_type,
+	printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
 	       (unsigned long long)gl->gl_name.ln_number);
 	printk(KERN_INFO "  gl_flags =");
-	for (x = 0; x < 32; x++)
+	for (x = 0; x < 32; x++) {
 		if (test_bit(x, &gl->gl_flags))
 			printk(" %u", x);
+	}
 	printk(" \n");
 	printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
 	printk(KERN_INFO "  gl_state = %u\n", gl->gl_state);
@@ -2136,8 +2138,7 @@
 	printk(KERN_INFO "  reclaim = %s\n",
 		    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
 	if (gl->gl_aspace)
-		printk(KERN_INFO "  aspace = 0x%p nrpages = %lu\n",
-		       gl->gl_aspace,
+		printk(KERN_INFO "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
 		       gl->gl_aspace->i_mapping->nrpages);
 	else
 		printk(KERN_INFO "  aspace = no\n");
@@ -2203,13 +2204,15 @@
 	int error = 0;
 
 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
-		bucket = &sdp->sd_gl_hash[x];
+		bucket = &gl_hash_table[x];
 
 		read_lock(&bucket->hb_lock);
 
 		list_for_each_entry(gl, &bucket->hb_list, gl_list) {
 			if (test_bit(GLF_PLUG, &gl->gl_flags))
 				continue;
+			if (gl->gl_sbd != sdp)
+				continue;
 
 			error = dump_glock(gl);
 			if (error)
@@ -2226,3 +2229,14 @@
 	return error;
 }
 
+int __init gfs2_glock_init(void)
+{
+	unsigned i;
+	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
+		struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i];
+		rwlock_init(&hb->hb_lock);
+		INIT_LIST_HEAD(&hb->hb_list);
+	}
+	return 0;
+}
+
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2e1d328..0febca3 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -150,4 +150,6 @@
 void gfs2_scand_internal(struct gfs2_sbd *sdp);
 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
 
+int __init gfs2_glock_init(void);
+
 #endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 225924c..6184960 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -30,7 +30,6 @@
 struct gfs2_trans;
 struct gfs2_ail;
 struct gfs2_jdesc;
-struct gfs2_gl_hash_bucket;
 struct gfs2_sbd;
 
 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
@@ -107,6 +106,11 @@
 	struct list_head bd_ail_gl_list;
 };
 
+struct gfs2_gl_hash_bucket {
+        rwlock_t hb_lock;
+        struct list_head hb_list;
+};
+
 struct gfs2_glock_operations {
 	void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
 			     int flags);
@@ -442,11 +446,6 @@
 	unsigned int gt_statfs_slow;
 };
 
-struct gfs2_gl_hash_bucket {
-	rwlock_t hb_lock;
-	struct list_head hb_list;
-};
-
 enum {
 	SDF_JOURNAL_CHECKED	= 0,
 	SDF_JOURNAL_LIVE	= 1,
@@ -489,7 +488,6 @@
 	/* Lock Stuff */
 
 	struct lm_lockstruct sd_lockstruct;
-	struct gfs2_gl_hash_bucket sd_gl_hash[GFS2_GL_HASH_SIZE];
 	struct list_head sd_reclaim_list;
 	spinlock_t sd_reclaim_lock;
 	wait_queue_head_t sd_reclaim_wq;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 143fda7..2bdf246 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -23,6 +23,7 @@
 #include "ops_fstype.h"
 #include "sys.h"
 #include "util.h"
+#include "glock.h"
 
 static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
 {
@@ -69,8 +70,11 @@
 	if (error)
 		return error;
 
-	error = -ENOMEM;
+	error = gfs2_glock_init();
+	if (error)
+		goto fail;
 
+	error = -ENOMEM;
 	gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
 					      sizeof(struct gfs2_glock),
 					      0, 0, 
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c94422b..f5140bd 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -45,23 +45,16 @@
 static struct gfs2_sbd *init_sbd(struct super_block *sb)
 {
 	struct gfs2_sbd *sdp;
-	unsigned int x;
 
-	sdp = vmalloc(sizeof(struct gfs2_sbd));
+	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
 	if (!sdp)
 		return NULL;
 
-	memset(sdp, 0, sizeof(struct gfs2_sbd));
-
 	sb->s_fs_info = sdp;
 	sdp->sd_vfs = sb;
 
 	gfs2_tune_init(&sdp->sd_tune);
 
-	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
-		rwlock_init(&sdp->sd_gl_hash[x].hb_lock);
-		INIT_LIST_HEAD(&sdp->sd_gl_hash[x].hb_list);
-	}
 	INIT_LIST_HEAD(&sdp->sd_reclaim_list);
 	spin_lock_init(&sdp->sd_reclaim_lock);
 	init_waitqueue_head(&sdp->sd_reclaim_wq);