[PATCH] md: merge raid5 and raid6 code

There is a lot of commonality between raid5.c and raid6main.c.  This patches
merges both into one module called raid456.  This saves a lot of code, and
paves the way for online raid5->raid6 migrations.

There is still duplication, e.g.  between handle_stripe5 and handle_stripe6.
This will probably be cleaned up later.

Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 122e64e..9ba7307 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2,8 +2,11 @@
  * raid5.c : Multiple Devices driver for Linux
  *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  *	   Copyright (C) 1999, 2000 Ingo Molnar
+ *	   Copyright (C) 2002, 2003 H. Peter Anvin
  *
- * RAID-5 management functions.
+ * RAID-4/5/6 management functions.
+ * Thanks to Penguin Computing for making the RAID-6 development possible
+ * by donating a test server!
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,11 +22,11 @@
 #include <linux/config.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/raid/raid5.h>
 #include <linux/highmem.h>
 #include <linux/bitops.h>
 #include <linux/kthread.h>
 #include <asm/atomic.h>
+#include "raid6.h"
 
 #include <linux/raid/bitmap.h>
 
@@ -68,6 +71,16 @@
 #define __inline__
 #endif
 
+#if !RAID6_USE_EMPTY_ZERO_PAGE
+/* In .bss so it's zeroed */
+const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
+#endif
+
+static inline int raid6_next_disk(int disk, int raid_disks)
+{
+	disk++;
+	return (disk < raid_disks) ? disk : 0;
+}
 static void print_raid5_conf (raid5_conf_t *conf);
 
 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
@@ -104,7 +117,7 @@
 {
 	raid5_conf_t *conf = sh->raid_conf;
 	unsigned long flags;
-	
+
 	spin_lock_irqsave(&conf->device_lock, flags);
 	__release_stripe(conf, sh);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -117,7 +130,7 @@
 	hlist_del_init(&sh->hash);
 }
 
-static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
+static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 {
 	struct hlist_head *hp = stripe_hash(conf, sh->sector);
 
@@ -190,7 +203,7 @@
 		(unsigned long long)sh->sector);
 
 	remove_hash(sh);
-	
+
 	sh->sector = sector;
 	sh->pd_idx = pd_idx;
 	sh->state = 0;
@@ -269,8 +282,9 @@
 			} else {
 				if (!test_bit(STRIPE_HANDLE, &sh->state))
 					atomic_inc(&conf->active_stripes);
-				if (!list_empty(&sh->lru))
-					list_del_init(&sh->lru);
+				if (list_empty(&sh->lru))
+					BUG();
+				list_del_init(&sh->lru);
 			}
 		}
 	} while (sh == NULL);
@@ -321,10 +335,9 @@
 		return 1;
 	conf->slab_cache = sc;
 	conf->pool_size = devs;
-	while (num--) {
+	while (num--)
 		if (!grow_one_stripe(conf))
 			return 1;
-	}
 	return 0;
 }
 
@@ -631,8 +644,7 @@
 	dev->req.bi_private = sh;
 
 	dev->flags = 0;
-	if (i != sh->pd_idx)
-		dev->sector = compute_blocknr(sh, i);
+	dev->sector = compute_blocknr(sh, i);
 }
 
 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -659,7 +671,7 @@
 			" Operation continuing on %d devices\n",
 			bdevname(rdev->bdev,b), conf->working_disks);
 	}
-}	
+}
 
 /*
  * Input: a 'big' sector number,
@@ -697,9 +709,12 @@
 	/*
 	 * Select the parity disk based on the user selected algorithm.
 	 */
-	if (conf->level == 4)
+	switch(conf->level) {
+	case 4:
 		*pd_idx = data_disks;
-	else switch (conf->algorithm) {
+		break;
+	case 5:
+		switch (conf->algorithm) {
 		case ALGORITHM_LEFT_ASYMMETRIC:
 			*pd_idx = data_disks - stripe % raid_disks;
 			if (*dd_idx >= *pd_idx)
@@ -721,6 +736,39 @@
 		default:
 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
 				conf->algorithm);
+		}
+		break;
+	case 6:
+
+		/**** FIX THIS ****/
+		switch (conf->algorithm) {
+		case ALGORITHM_LEFT_ASYMMETRIC:
+			*pd_idx = raid_disks - 1 - (stripe % raid_disks);
+			if (*pd_idx == raid_disks-1)
+				(*dd_idx)++; 	/* Q D D D P */
+			else if (*dd_idx >= *pd_idx)
+				(*dd_idx) += 2; /* D D P Q D */
+			break;
+		case ALGORITHM_RIGHT_ASYMMETRIC:
+			*pd_idx = stripe % raid_disks;
+			if (*pd_idx == raid_disks-1)
+				(*dd_idx)++; 	/* Q D D D P */
+			else if (*dd_idx >= *pd_idx)
+				(*dd_idx) += 2; /* D D P Q D */
+			break;
+		case ALGORITHM_LEFT_SYMMETRIC:
+			*pd_idx = raid_disks - 1 - (stripe % raid_disks);
+			*dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
+			break;
+		case ALGORITHM_RIGHT_SYMMETRIC:
+			*pd_idx = stripe % raid_disks;
+			*dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
+			break;
+		default:
+			printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
+				conf->algorithm);
+		}
+		break;
 	}
 
 	/*
@@ -742,12 +790,17 @@
 	int chunk_number, dummy1, dummy2, dd_idx = i;
 	sector_t r_sector;
 
+
 	chunk_offset = sector_div(new_sector, sectors_per_chunk);
 	stripe = new_sector;
 	BUG_ON(new_sector != stripe);
 
-	
-	switch (conf->algorithm) {
+	if (i == sh->pd_idx)
+		return 0;
+	switch(conf->level) {
+	case 4: break;
+	case 5:
+		switch (conf->algorithm) {
 		case ALGORITHM_LEFT_ASYMMETRIC:
 		case ALGORITHM_RIGHT_ASYMMETRIC:
 			if (i > sh->pd_idx)
@@ -761,7 +814,37 @@
 			break;
 		default:
 			printk(KERN_ERR "raid5: unsupported algorithm %d\n",
+			       conf->algorithm);
+		}
+		break;
+	case 6:
+		data_disks = raid_disks - 2;
+		if (i == raid6_next_disk(sh->pd_idx, raid_disks))
+			return 0; /* It is the Q disk */
+		switch (conf->algorithm) {
+		case ALGORITHM_LEFT_ASYMMETRIC:
+		case ALGORITHM_RIGHT_ASYMMETRIC:
+		  	if (sh->pd_idx == raid_disks-1)
+				i--; 	/* Q D D D P */
+			else if (i > sh->pd_idx)
+				i -= 2; /* D D P Q D */
+			break;
+		case ALGORITHM_LEFT_SYMMETRIC:
+		case ALGORITHM_RIGHT_SYMMETRIC:
+			if (sh->pd_idx == raid_disks-1)
+				i--; /* Q D D D P */
+			else {
+				/* D D P Q D */
+				if (i < sh->pd_idx)
+					i += raid_disks;
+				i -= (sh->pd_idx + 2);
+			}
+			break;
+		default:
+			printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
 				conf->algorithm);
+		}
+		break;
 	}
 
 	chunk_number = stripe * data_disks + i;
@@ -778,10 +861,11 @@
 
 
 /*
- * Copy data between a page in the stripe cache, and a bio.
- * There are no alignment or size guarantees between the page or the
- * bio except that there is some overlap.
- * All iovecs in the bio must be considered.
+ * Copy data between a page in the stripe cache, and one or more bion
+ * The page could align with the middle of the bio, or there could be
+ * several bion, each with several bio_vecs, which cover part of the page
+ * Multiple bion are linked together on bi_next.  There may be extras
+ * at the end of this list.  We ignore them.
  */
 static void copy_data(int frombio, struct bio *bio,
 		     struct page *page,
@@ -810,7 +894,7 @@
 		if (len > 0 && page_offset + len > STRIPE_SIZE)
 			clen = STRIPE_SIZE - page_offset;
 		else clen = len;
-			
+
 		if (clen > 0) {
 			char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
 			if (frombio)
@@ -862,14 +946,14 @@
 	set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
 }
 
-static void compute_parity(struct stripe_head *sh, int method)
+static void compute_parity5(struct stripe_head *sh, int method)
 {
 	raid5_conf_t *conf = sh->raid_conf;
 	int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
 	void *ptr[MAX_XOR_BLOCKS];
 	struct bio *chosen;
 
-	PRINTK("compute_parity, stripe %llu, method %d\n",
+	PRINTK("compute_parity5, stripe %llu, method %d\n",
 		(unsigned long long)sh->sector, method);
 
 	count = 1;
@@ -956,9 +1040,195 @@
 		clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
 }
 
+static void compute_parity6(struct stripe_head *sh, int method)
+{
+	raid6_conf_t *conf = sh->raid_conf;
+	int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
+	struct bio *chosen;
+	/**** FIX THIS: This could be very bad if disks is close to 256 ****/
+	void *ptrs[disks];
+
+	qd_idx = raid6_next_disk(pd_idx, disks);
+	d0_idx = raid6_next_disk(qd_idx, disks);
+
+	PRINTK("compute_parity, stripe %llu, method %d\n",
+		(unsigned long long)sh->sector, method);
+
+	switch(method) {
+	case READ_MODIFY_WRITE:
+		BUG();		/* READ_MODIFY_WRITE N/A for RAID-6 */
+	case RECONSTRUCT_WRITE:
+		for (i= disks; i-- ;)
+			if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
+				chosen = sh->dev[i].towrite;
+				sh->dev[i].towrite = NULL;
+
+				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+					wake_up(&conf->wait_for_overlap);
+
+				if (sh->dev[i].written) BUG();
+				sh->dev[i].written = chosen;
+			}
+		break;
+	case CHECK_PARITY:
+		BUG();		/* Not implemented yet */
+	}
+
+	for (i = disks; i--;)
+		if (sh->dev[i].written) {
+			sector_t sector = sh->dev[i].sector;
+			struct bio *wbi = sh->dev[i].written;
+			while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
+				copy_data(1, wbi, sh->dev[i].page, sector);
+				wbi = r5_next_bio(wbi, sector);
+			}
+
+			set_bit(R5_LOCKED, &sh->dev[i].flags);
+			set_bit(R5_UPTODATE, &sh->dev[i].flags);
+		}
+
+//	switch(method) {
+//	case RECONSTRUCT_WRITE:
+//	case CHECK_PARITY:
+//	case UPDATE_PARITY:
+		/* Note that unlike RAID-5, the ordering of the disks matters greatly. */
+		/* FIX: Is this ordering of drives even remotely optimal? */
+		count = 0;
+		i = d0_idx;
+		do {
+			ptrs[count++] = page_address(sh->dev[i].page);
+			if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
+				printk("block %d/%d not uptodate on parity calc\n", i,count);
+			i = raid6_next_disk(i, disks);
+		} while ( i != d0_idx );
+//		break;
+//	}
+
+	raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
+
+	switch(method) {
+	case RECONSTRUCT_WRITE:
+		set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+		set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
+		set_bit(R5_LOCKED,   &sh->dev[pd_idx].flags);
+		set_bit(R5_LOCKED,   &sh->dev[qd_idx].flags);
+		break;
+	case UPDATE_PARITY:
+		set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+		set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
+		break;
+	}
+}
+
+
+/* Compute one missing block */
+static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
+{
+	raid6_conf_t *conf = sh->raid_conf;
+	int i, count, disks = conf->raid_disks;
+	void *ptr[MAX_XOR_BLOCKS], *p;
+	int pd_idx = sh->pd_idx;
+	int qd_idx = raid6_next_disk(pd_idx, disks);
+
+	PRINTK("compute_block_1, stripe %llu, idx %d\n",
+		(unsigned long long)sh->sector, dd_idx);
+
+	if ( dd_idx == qd_idx ) {
+		/* We're actually computing the Q drive */
+		compute_parity6(sh, UPDATE_PARITY);
+	} else {
+		ptr[0] = page_address(sh->dev[dd_idx].page);
+		if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
+		count = 1;
+		for (i = disks ; i--; ) {
+			if (i == dd_idx || i == qd_idx)
+				continue;
+			p = page_address(sh->dev[i].page);
+			if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
+				ptr[count++] = p;
+			else
+				printk("compute_block() %d, stripe %llu, %d"
+				       " not present\n", dd_idx,
+				       (unsigned long long)sh->sector, i);
+
+			check_xor();
+		}
+		if (count != 1)
+			xor_block(count, STRIPE_SIZE, ptr);
+		if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+		else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+	}
+}
+
+/* Compute two missing blocks */
+static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
+{
+	raid6_conf_t *conf = sh->raid_conf;
+	int i, count, disks = conf->raid_disks;
+	int pd_idx = sh->pd_idx;
+	int qd_idx = raid6_next_disk(pd_idx, disks);
+	int d0_idx = raid6_next_disk(qd_idx, disks);
+	int faila, failb;
+
+	/* faila and failb are disk numbers relative to d0_idx */
+	/* pd_idx become disks-2 and qd_idx become disks-1 */
+	faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
+	failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
+
+	BUG_ON(faila == failb);
+	if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
+
+	PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
+	       (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
+
+	if ( failb == disks-1 ) {
+		/* Q disk is one of the missing disks */
+		if ( faila == disks-2 ) {
+			/* Missing P+Q, just recompute */
+			compute_parity6(sh, UPDATE_PARITY);
+			return;
+		} else {
+			/* We're missing D+Q; recompute D from P */
+			compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
+			compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
+			return;
+		}
+	}
+
+	/* We're missing D+P or D+D; build pointer table */
+	{
+		/**** FIX THIS: This could be very bad if disks is close to 256 ****/
+		void *ptrs[disks];
+
+		count = 0;
+		i = d0_idx;
+		do {
+			ptrs[count++] = page_address(sh->dev[i].page);
+			i = raid6_next_disk(i, disks);
+			if (i != dd_idx1 && i != dd_idx2 &&
+			    !test_bit(R5_UPTODATE, &sh->dev[i].flags))
+				printk("compute_2 with missing block %d/%d\n", count, i);
+		} while ( i != d0_idx );
+
+		if ( failb == disks-2 ) {
+			/* We're missing D+P. */
+			raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
+		} else {
+			/* We're missing D+D. */
+			raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
+		}
+
+		/* Both the above update both missing blocks */
+		set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
+		set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
+	}
+}
+
+
+
 /*
  * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain. 
+ * toread/towrite point to the first in a chain.
  * The bi_next chain must be in order.
  */
 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
@@ -1031,6 +1301,13 @@
 
 static void end_reshape(raid5_conf_t *conf);
 
+static int page_is_zero(struct page *p)
+{
+	char *a = page_address(p);
+	return ((*(u32*)a) == 0 &&
+		memcmp(a, a+4, STRIPE_SIZE-4)==0);
+}
+
 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
 {
 	int sectors_per_chunk = conf->chunk_size >> 9;
@@ -1062,7 +1339,7 @@
  *
  */
  
-static void handle_stripe(struct stripe_head *sh)
+static void handle_stripe5(struct stripe_head *sh)
 {
 	raid5_conf_t *conf = sh->raid_conf;
 	int disks = sh->disks;
@@ -1394,7 +1671,7 @@
 		if (locked == 0 && (rcw == 0 ||rmw == 0) &&
 		    !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
 			PRINTK("Computing parity...\n");
-			compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
+			compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
 			/* now every locked buffer is ready to be written */
 			for (i=disks; i--;)
 				if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
@@ -1421,13 +1698,10 @@
 	    !test_bit(STRIPE_INSYNC, &sh->state)) {
 		set_bit(STRIPE_HANDLE, &sh->state);
 		if (failed == 0) {
-			char *pagea;
 			BUG_ON(uptodate != disks);
-			compute_parity(sh, CHECK_PARITY);
+			compute_parity5(sh, CHECK_PARITY);
 			uptodate--;
-			pagea = page_address(sh->dev[sh->pd_idx].page);
-			if ((*(u32*)pagea) == 0 &&
-			    !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
+			if (page_is_zero(sh->dev[sh->pd_idx].page)) {
 				/* parity is correct (on disc, not in buffer any more) */
 				set_bit(STRIPE_INSYNC, &sh->state);
 			} else {
@@ -1487,7 +1761,7 @@
 		/* Need to write out all blocks after computing parity */
 		sh->disks = conf->raid_disks;
 		sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
-		compute_parity(sh, RECONSTRUCT_WRITE);
+		compute_parity5(sh, RECONSTRUCT_WRITE);
 		for (i= conf->raid_disks; i--;) {
 			set_bit(R5_LOCKED, &sh->dev[i].flags);
 			locked++;
@@ -1615,6 +1889,569 @@
 	}
 }
 
+static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+{
+	raid6_conf_t *conf = sh->raid_conf;
+	int disks = conf->raid_disks;
+	struct bio *return_bi= NULL;
+	struct bio *bi;
+	int i;
+	int syncing;
+	int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
+	int non_overwrite = 0;
+	int failed_num[2] = {0, 0};
+	struct r5dev *dev, *pdev, *qdev;
+	int pd_idx = sh->pd_idx;
+	int qd_idx = raid6_next_disk(pd_idx, disks);
+	int p_failed, q_failed;
+
+	PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
+	       (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
+	       pd_idx, qd_idx);
+
+	spin_lock(&sh->lock);
+	clear_bit(STRIPE_HANDLE, &sh->state);
+	clear_bit(STRIPE_DELAYED, &sh->state);
+
+	syncing = test_bit(STRIPE_SYNCING, &sh->state);
+	/* Now to look around and see what can be done */
+
+	rcu_read_lock();
+	for (i=disks; i--; ) {
+		mdk_rdev_t *rdev;
+		dev = &sh->dev[i];
+		clear_bit(R5_Insync, &dev->flags);
+
+		PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
+			i, dev->flags, dev->toread, dev->towrite, dev->written);
+		/* maybe we can reply to a read */
+		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
+			struct bio *rbi, *rbi2;
+			PRINTK("Return read for disc %d\n", i);
+			spin_lock_irq(&conf->device_lock);
+			rbi = dev->toread;
+			dev->toread = NULL;
+			if (test_and_clear_bit(R5_Overlap, &dev->flags))
+				wake_up(&conf->wait_for_overlap);
+			spin_unlock_irq(&conf->device_lock);
+			while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
+				copy_data(0, rbi, dev->page, dev->sector);
+				rbi2 = r5_next_bio(rbi, dev->sector);
+				spin_lock_irq(&conf->device_lock);
+				if (--rbi->bi_phys_segments == 0) {
+					rbi->bi_next = return_bi;
+					return_bi = rbi;
+				}
+				spin_unlock_irq(&conf->device_lock);
+				rbi = rbi2;
+			}
+		}
+
+		/* now count some things */
+		if (test_bit(R5_LOCKED, &dev->flags)) locked++;
+		if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
+
+
+		if (dev->toread) to_read++;
+		if (dev->towrite) {
+			to_write++;
+			if (!test_bit(R5_OVERWRITE, &dev->flags))
+				non_overwrite++;
+		}
+		if (dev->written) written++;
+		rdev = rcu_dereference(conf->disks[i].rdev);
+		if (!rdev || !test_bit(In_sync, &rdev->flags)) {
+			/* The ReadError flag will just be confusing now */
+			clear_bit(R5_ReadError, &dev->flags);
+			clear_bit(R5_ReWrite, &dev->flags);
+		}
+		if (!rdev || !test_bit(In_sync, &rdev->flags)
+		    || test_bit(R5_ReadError, &dev->flags)) {
+			if ( failed < 2 )
+				failed_num[failed] = i;
+			failed++;
+		} else
+			set_bit(R5_Insync, &dev->flags);
+	}
+	rcu_read_unlock();
+	PRINTK("locked=%d uptodate=%d to_read=%d"
+	       " to_write=%d failed=%d failed_num=%d,%d\n",
+	       locked, uptodate, to_read, to_write, failed,
+	       failed_num[0], failed_num[1]);
+	/* check if the array has lost >2 devices and, if so, some requests might
+	 * need to be failed
+	 */
+	if (failed > 2 && to_read+to_write+written) {
+		for (i=disks; i--; ) {
+			int bitmap_end = 0;
+
+			if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+				mdk_rdev_t *rdev;
+				rcu_read_lock();
+				rdev = rcu_dereference(conf->disks[i].rdev);
+				if (rdev && test_bit(In_sync, &rdev->flags))
+					/* multiple read failures in one stripe */
+					md_error(conf->mddev, rdev);
+				rcu_read_unlock();
+			}
+
+			spin_lock_irq(&conf->device_lock);
+			/* fail all writes first */
+			bi = sh->dev[i].towrite;
+			sh->dev[i].towrite = NULL;
+			if (bi) { to_write--; bitmap_end = 1; }
+
+			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+				wake_up(&conf->wait_for_overlap);
+
+			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
+				struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+				clear_bit(BIO_UPTODATE, &bi->bi_flags);
+				if (--bi->bi_phys_segments == 0) {
+					md_write_end(conf->mddev);
+					bi->bi_next = return_bi;
+					return_bi = bi;
+				}
+				bi = nextbi;
+			}
+			/* and fail all 'written' */
+			bi = sh->dev[i].written;
+			sh->dev[i].written = NULL;
+			if (bi) bitmap_end = 1;
+			while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
+				struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
+				clear_bit(BIO_UPTODATE, &bi->bi_flags);
+				if (--bi->bi_phys_segments == 0) {
+					md_write_end(conf->mddev);
+					bi->bi_next = return_bi;
+					return_bi = bi;
+				}
+				bi = bi2;
+			}
+
+			/* fail any reads if this device is non-operational */
+			if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+			    test_bit(R5_ReadError, &sh->dev[i].flags)) {
+				bi = sh->dev[i].toread;
+				sh->dev[i].toread = NULL;
+				if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+					wake_up(&conf->wait_for_overlap);
+				if (bi) to_read--;
+				while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
+					struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+					clear_bit(BIO_UPTODATE, &bi->bi_flags);
+					if (--bi->bi_phys_segments == 0) {
+						bi->bi_next = return_bi;
+						return_bi = bi;
+					}
+					bi = nextbi;
+				}
+			}
+			spin_unlock_irq(&conf->device_lock);
+			if (bitmap_end)
+				bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+						STRIPE_SECTORS, 0, 0);
+		}
+	}
+	if (failed > 2 && syncing) {
+		md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+		clear_bit(STRIPE_SYNCING, &sh->state);
+		syncing = 0;
+	}
+
+	/*
+	 * might be able to return some write requests if the parity blocks
+	 * are safe, or on a failed drive
+	 */
+	pdev = &sh->dev[pd_idx];
+	p_failed = (failed >= 1 && failed_num[0] == pd_idx)
+		|| (failed >= 2 && failed_num[1] == pd_idx);
+	qdev = &sh->dev[qd_idx];
+	q_failed = (failed >= 1 && failed_num[0] == qd_idx)
+		|| (failed >= 2 && failed_num[1] == qd_idx);
+
+	if ( written &&
+	     ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
+			     && !test_bit(R5_LOCKED, &pdev->flags)
+			     && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
+	     ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
+			     && !test_bit(R5_LOCKED, &qdev->flags)
+			     && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
+		/* any written block on an uptodate or failed drive can be
+		 * returned.  Note that if we 'wrote' to a failed drive,
+		 * it will be UPTODATE, but never LOCKED, so we don't need
+		 * to test 'failed' directly.
+		 */
+		for (i=disks; i--; )
+			if (sh->dev[i].written) {
+				dev = &sh->dev[i];
+				if (!test_bit(R5_LOCKED, &dev->flags) &&
+				    test_bit(R5_UPTODATE, &dev->flags) ) {
+					/* We can return any write requests */
+					int bitmap_end = 0;
+					struct bio *wbi, *wbi2;
+					PRINTK("Return write for stripe %llu disc %d\n",
+					       (unsigned long long)sh->sector, i);
+					spin_lock_irq(&conf->device_lock);
+					wbi = dev->written;
+					dev->written = NULL;
+					while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
+						wbi2 = r5_next_bio(wbi, dev->sector);
+						if (--wbi->bi_phys_segments == 0) {
+							md_write_end(conf->mddev);
+							wbi->bi_next = return_bi;
+							return_bi = wbi;
+						}
+						wbi = wbi2;
+					}
+					if (dev->towrite == NULL)
+						bitmap_end = 1;
+					spin_unlock_irq(&conf->device_lock);
+					if (bitmap_end)
+						bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+								STRIPE_SECTORS,
+								!test_bit(STRIPE_DEGRADED, &sh->state), 0);
+				}
+			}
+	}
+
+	/* Now we might consider reading some blocks, either to check/generate
+	 * parity, or to satisfy requests
+	 * or to load a block that is being partially written.
+	 */
+	if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
+		for (i=disks; i--;) {
+			dev = &sh->dev[i];
+			if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
+			    (dev->toread ||
+			     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+			     syncing ||
+			     (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
+			     (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
+				    )
+				) {
+				/* we would like to get this block, possibly
+				 * by computing it, but we might not be able to
+				 */
+				if (uptodate == disks-1) {
+					PRINTK("Computing stripe %llu block %d\n",
+					       (unsigned long long)sh->sector, i);
+					compute_block_1(sh, i, 0);
+					uptodate++;
+				} else if ( uptodate == disks-2 && failed >= 2 ) {
+					/* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
+					int other;
+					for (other=disks; other--;) {
+						if ( other == i )
+							continue;
+						if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
+							break;
+					}
+					BUG_ON(other < 0);
+					PRINTK("Computing stripe %llu blocks %d,%d\n",
+					       (unsigned long long)sh->sector, i, other);
+					compute_block_2(sh, i, other);
+					uptodate += 2;
+				} else if (test_bit(R5_Insync, &dev->flags)) {
+					set_bit(R5_LOCKED, &dev->flags);
+					set_bit(R5_Wantread, &dev->flags);
+#if 0
+					/* if I am just reading this block and we don't have
+					   a failed drive, or any pending writes then sidestep the cache */
+					if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
+					    ! syncing && !failed && !to_write) {
+						sh->bh_cache[i]->b_page =  sh->bh_read[i]->b_page;
+						sh->bh_cache[i]->b_data =  sh->bh_read[i]->b_data;
+					}
+#endif
+					locked++;
+					PRINTK("Reading block %d (sync=%d)\n",
+						i, syncing);
+				}
+			}
+		}
+		set_bit(STRIPE_HANDLE, &sh->state);
+	}
+
+	/* now to consider writing and what else, if anything should be read */
+	if (to_write) {
+		int rcw=0, must_compute=0;
+		for (i=disks ; i--;) {
+			dev = &sh->dev[i];
+			/* Would I have to read this buffer for reconstruct_write */
+			if (!test_bit(R5_OVERWRITE, &dev->flags)
+			    && i != pd_idx && i != qd_idx
+			    && (!test_bit(R5_LOCKED, &dev->flags)
+#if 0
+				|| sh->bh_page[i] != bh->b_page
+#endif
+				    ) &&
+			    !test_bit(R5_UPTODATE, &dev->flags)) {
+				if (test_bit(R5_Insync, &dev->flags)) rcw++;
+				else {
+					PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
+					must_compute++;
+				}
+			}
+		}
+		PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
+		       (unsigned long long)sh->sector, rcw, must_compute);
+		set_bit(STRIPE_HANDLE, &sh->state);
+
+		if (rcw > 0)
+			/* want reconstruct write, but need to get some data */
+			for (i=disks; i--;) {
+				dev = &sh->dev[i];
+				if (!test_bit(R5_OVERWRITE, &dev->flags)
+				    && !(failed == 0 && (i == pd_idx || i == qd_idx))
+				    && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
+				    test_bit(R5_Insync, &dev->flags)) {
+					if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+					{
+						PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
+						       (unsigned long long)sh->sector, i);
+						set_bit(R5_LOCKED, &dev->flags);
+						set_bit(R5_Wantread, &dev->flags);
+						locked++;
+					} else {
+						PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
+						       (unsigned long long)sh->sector, i);
+						set_bit(STRIPE_DELAYED, &sh->state);
+						set_bit(STRIPE_HANDLE, &sh->state);
+					}
+				}
+			}
+		/* now if nothing is locked, and if we have enough data, we can start a write request */
+		if (locked == 0 && rcw == 0 &&
+		    !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
+			if ( must_compute > 0 ) {
+				/* We have failed blocks and need to compute them */
+				switch ( failed ) {
+				case 0:	BUG();
+				case 1: compute_block_1(sh, failed_num[0], 0); break;
+				case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
+				default: BUG();	/* This request should have been failed? */
+				}
+			}
+
+			PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
+			compute_parity6(sh, RECONSTRUCT_WRITE);
+			/* now every locked buffer is ready to be written */
+			for (i=disks; i--;)
+				if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
+					PRINTK("Writing stripe %llu block %d\n",
+					       (unsigned long long)sh->sector, i);
+					locked++;
+					set_bit(R5_Wantwrite, &sh->dev[i].flags);
+				}
+			/* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
+			set_bit(STRIPE_INSYNC, &sh->state);
+
+			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+				atomic_dec(&conf->preread_active_stripes);
+				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
+					md_wakeup_thread(conf->mddev->thread);
+			}
+		}
+	}
+
+	/* maybe we need to check and possibly fix the parity for this stripe
+	 * Any reads will already have been scheduled, so we just see if enough data
+	 * is available
+	 */
+	if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
+		int update_p = 0, update_q = 0;
+		struct r5dev *dev;
+
+		set_bit(STRIPE_HANDLE, &sh->state);
+
+		BUG_ON(failed>2);
+		BUG_ON(uptodate < disks);
+		/* Want to check and possibly repair P and Q.
+		 * However there could be one 'failed' device, in which
+		 * case we can only check one of them, possibly using the
+		 * other to generate missing data
+		 */
+
+		/* If !tmp_page, we cannot do the calculations,
+		 * but as we have set STRIPE_HANDLE, we will soon be called
+		 * by stripe_handle with a tmp_page - just wait until then.
+		 */
+		if (tmp_page) {
+			if (failed == q_failed) {
+				/* The only possible failed device holds 'Q', so it makes
+				 * sense to check P (If anything else were failed, we would
+				 * have used P to recreate it).
+				 */
+				compute_block_1(sh, pd_idx, 1);
+				if (!page_is_zero(sh->dev[pd_idx].page)) {
+					compute_block_1(sh,pd_idx,0);
+					update_p = 1;
+				}
+			}
+			if (!q_failed && failed < 2) {
+				/* q is not failed, and we didn't use it to generate
+				 * anything, so it makes sense to check it
+				 */
+				memcpy(page_address(tmp_page),
+				       page_address(sh->dev[qd_idx].page),
+				       STRIPE_SIZE);
+				compute_parity6(sh, UPDATE_PARITY);
+				if (memcmp(page_address(tmp_page),
+					   page_address(sh->dev[qd_idx].page),
+					   STRIPE_SIZE)!= 0) {
+					clear_bit(STRIPE_INSYNC, &sh->state);
+					update_q = 1;
+				}
+			}
+			if (update_p || update_q) {
+				conf->mddev->resync_mismatches += STRIPE_SECTORS;
+				if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+					/* don't try to repair!! */
+					update_p = update_q = 0;
+			}
+
+			/* now write out any block on a failed drive,
+			 * or P or Q if they need it
+			 */
+
+			if (failed == 2) {
+				dev = &sh->dev[failed_num[1]];
+				locked++;
+				set_bit(R5_LOCKED, &dev->flags);
+				set_bit(R5_Wantwrite, &dev->flags);
+			}
+			if (failed >= 1) {
+				dev = &sh->dev[failed_num[0]];
+				locked++;
+				set_bit(R5_LOCKED, &dev->flags);
+				set_bit(R5_Wantwrite, &dev->flags);
+			}
+
+			if (update_p) {
+				dev = &sh->dev[pd_idx];
+				locked ++;
+				set_bit(R5_LOCKED, &dev->flags);
+				set_bit(R5_Wantwrite, &dev->flags);
+			}
+			if (update_q) {
+				dev = &sh->dev[qd_idx];
+				locked++;
+				set_bit(R5_LOCKED, &dev->flags);
+				set_bit(R5_Wantwrite, &dev->flags);
+			}
+			clear_bit(STRIPE_DEGRADED, &sh->state);
+
+			set_bit(STRIPE_INSYNC, &sh->state);
+		}
+	}
+
+	if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+		md_done_sync(conf->mddev, STRIPE_SECTORS,1);
+		clear_bit(STRIPE_SYNCING, &sh->state);
+	}
+
+	/* If the failed drives are just a ReadError, then we might need
+	 * to progress the repair/check process
+	 */
+	if (failed <= 2 && ! conf->mddev->ro)
+		for (i=0; i<failed;i++) {
+			dev = &sh->dev[failed_num[i]];
+			if (test_bit(R5_ReadError, &dev->flags)
+			    && !test_bit(R5_LOCKED, &dev->flags)
+			    && test_bit(R5_UPTODATE, &dev->flags)
+				) {
+				if (!test_bit(R5_ReWrite, &dev->flags)) {
+					set_bit(R5_Wantwrite, &dev->flags);
+					set_bit(R5_ReWrite, &dev->flags);
+					set_bit(R5_LOCKED, &dev->flags);
+				} else {
+					/* let's read it back */
+					set_bit(R5_Wantread, &dev->flags);
+					set_bit(R5_LOCKED, &dev->flags);
+				}
+			}
+		}
+	spin_unlock(&sh->lock);
+
+	while ((bi=return_bi)) {
+		int bytes = bi->bi_size;
+
+		return_bi = bi->bi_next;
+		bi->bi_next = NULL;
+		bi->bi_size = 0;
+		bi->bi_end_io(bi, bytes, 0);
+	}
+	for (i=disks; i-- ;) {
+		int rw;
+		struct bio *bi;
+		mdk_rdev_t *rdev;
+		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
+			rw = 1;
+		else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
+			rw = 0;
+		else
+			continue;
+
+		bi = &sh->dev[i].req;
+
+		bi->bi_rw = rw;
+		if (rw)
+			bi->bi_end_io = raid5_end_write_request;
+		else
+			bi->bi_end_io = raid5_end_read_request;
+
+		rcu_read_lock();
+		rdev = rcu_dereference(conf->disks[i].rdev);
+		if (rdev && test_bit(Faulty, &rdev->flags))
+			rdev = NULL;
+		if (rdev)
+			atomic_inc(&rdev->nr_pending);
+		rcu_read_unlock();
+
+		if (rdev) {
+			if (syncing)
+				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+
+			bi->bi_bdev = rdev->bdev;
+			PRINTK("for %llu schedule op %ld on disc %d\n",
+				(unsigned long long)sh->sector, bi->bi_rw, i);
+			atomic_inc(&sh->count);
+			bi->bi_sector = sh->sector + rdev->data_offset;
+			bi->bi_flags = 1 << BIO_UPTODATE;
+			bi->bi_vcnt = 1;
+			bi->bi_max_vecs = 1;
+			bi->bi_idx = 0;
+			bi->bi_io_vec = &sh->dev[i].vec;
+			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+			bi->bi_io_vec[0].bv_offset = 0;
+			bi->bi_size = STRIPE_SIZE;
+			bi->bi_next = NULL;
+			if (rw == WRITE &&
+			    test_bit(R5_ReWrite, &sh->dev[i].flags))
+				atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
+			generic_make_request(bi);
+		} else {
+			if (rw == 1)
+				set_bit(STRIPE_DEGRADED, &sh->state);
+			PRINTK("skip op %ld on disc %d for sector %llu\n",
+				bi->bi_rw, i, (unsigned long long)sh->sector);
+			clear_bit(R5_LOCKED, &sh->dev[i].flags);
+			set_bit(STRIPE_HANDLE, &sh->state);
+		}
+	}
+}
+
+static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+{
+	if (sh->raid_conf->level == 6)
+		handle_stripe6(sh, tmp_page);
+	else
+		handle_stripe5(sh);
+}
+
+
+
 static void raid5_activate_delayed(raid5_conf_t *conf)
 {
 	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
@@ -1753,7 +2590,7 @@
 
 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
 		DEFINE_WAIT(w);
-		int disks;
+		int disks, data_disks;
 
 	retry:
 		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
@@ -1781,7 +2618,9 @@
 			}
 			spin_unlock_irq(&conf->device_lock);
 		}
- 		new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
+		data_disks = disks - conf->max_degraded;
+
+ 		new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
 						  &dd_idx, &pd_idx, conf);
 		PRINTK("raid5: make_request, sector %llu logical %llu\n",
 			(unsigned long long)new_sector, 
@@ -1833,7 +2672,7 @@
 			}
 			finish_wait(&conf->wait_for_overlap, &w);
 			raid5_plug_device(conf);
-			handle_stripe(sh);
+			handle_stripe(sh, NULL);
 			release_stripe(sh);
 		} else {
 			/* cannot get stripe for read-ahead, just give-up */
@@ -1849,7 +2688,7 @@
 	if (remaining == 0) {
 		int bytes = bi->bi_size;
 
-		if ( bio_data_dir(bi) == WRITE )
+		if ( rw == WRITE )
 			md_write_end(mddev);
 		bi->bi_size = 0;
 		bi->bi_end_io(bi, bytes, 0);
@@ -1865,9 +2704,11 @@
 	int pd_idx;
 	sector_t first_sector, last_sector;
 	int raid_disks = conf->raid_disks;
-	int data_disks = raid_disks-1;
+	int data_disks = raid_disks - conf->max_degraded;
 	sector_t max_sector = mddev->size << 1;
 	int sync_blocks;
+	int still_degraded = 0;
+	int i;
 
 	if (sector_nr >= max_sector) {
 		/* just being told to finish up .. nothing much to do */
@@ -1880,7 +2721,7 @@
 		if (mddev->curr_resync < max_sector) /* aborted */
 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
 					&sync_blocks, 1);
-		else /* compelted sync */
+		else /* completed sync */
 			conf->fullsync = 0;
 		bitmap_close_sync(mddev->bitmap);
 
@@ -2003,11 +2844,12 @@
 		}
 		return conf->chunk_size>>9;
 	}
-	/* if there is 1 or more failed drives and we are trying
+	/* if there is too many failed drives and we are trying
 	 * to resync, then assert that we are finished, because there is
 	 * nothing we can do.
 	 */
-	if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+	if (mddev->degraded >= (data_disks - raid_disks) &&
+	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
 		sector_t rv = (mddev->size << 1) - sector_nr;
 		*skipped = 1;
 		return rv;
@@ -2026,17 +2868,26 @@
 	if (sh == NULL) {
 		sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
 		/* make sure we don't swamp the stripe cache if someone else
-		 * is trying to get access 
+		 * is trying to get access
 		 */
 		schedule_timeout_uninterruptible(1);
 	}
-	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
-	spin_lock(&sh->lock);	
+	/* Need to check if array will still be degraded after recovery/resync
+	 * We don't need to check the 'failed' flag as when that gets set,
+	 * recovery aborts.
+	 */
+	for (i=0; i<mddev->raid_disks; i++)
+		if (conf->disks[i].rdev == NULL)
+			still_degraded = 1;
+
+	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
+
+	spin_lock(&sh->lock);
 	set_bit(STRIPE_SYNCING, &sh->state);
 	clear_bit(STRIPE_INSYNC, &sh->state);
 	spin_unlock(&sh->lock);
 
-	handle_stripe(sh);
+	handle_stripe(sh, NULL);
 	release_stripe(sh);
 
 	return STRIPE_SECTORS;
@@ -2091,7 +2942,7 @@
 		spin_unlock_irq(&conf->device_lock);
 		
 		handled++;
-		handle_stripe(sh);
+		handle_stripe(sh, conf->spare_page);
 		release_stripe(sh);
 
 		spin_lock_irq(&conf->device_lock);
@@ -2181,8 +3032,8 @@
 	struct disk_info *disk;
 	struct list_head *tmp;
 
-	if (mddev->level != 5 && mddev->level != 4) {
-		printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
+	if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
+		printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
 		       mdname(mddev), mddev->level);
 		return -EIO;
 	}
@@ -2251,6 +3102,11 @@
 	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
 		goto abort;
 
+	if (mddev->level == 6) {
+		conf->spare_page = alloc_page(GFP_KERNEL);
+		if (!conf->spare_page)
+			goto abort;
+	}
 	spin_lock_init(&conf->device_lock);
 	init_waitqueue_head(&conf->wait_for_stripe);
 	init_waitqueue_head(&conf->wait_for_overlap);
@@ -2282,12 +3138,16 @@
 	}
 
 	/*
-	 * 0 for a fully functional array, 1 for a degraded array.
+	 * 0 for a fully functional array, 1 or 2 for a degraded array.
 	 */
 	mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
 	conf->mddev = mddev;
 	conf->chunk_size = mddev->chunk_size;
 	conf->level = mddev->level;
+	if (conf->level == 6)
+		conf->max_degraded = 2;
+	else
+		conf->max_degraded = 1;
 	conf->algorithm = mddev->layout;
 	conf->max_nr_stripes = NR_STRIPES;
 	conf->expand_progress = mddev->reshape_position;
@@ -2296,6 +3156,11 @@
 	mddev->size &= ~(mddev->chunk_size/1024 -1);
 	mddev->resync_max_sectors = mddev->size << 1;
 
+	if (conf->level == 6 && conf->raid_disks < 4) {
+		printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+		       mdname(mddev), conf->raid_disks);
+		goto abort;
+	}
 	if (!conf->chunk_size || conf->chunk_size % 4) {
 		printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
 			conf->chunk_size, mdname(mddev));
@@ -2307,14 +3172,14 @@
 			conf->algorithm, mdname(mddev));
 		goto abort;
 	}
-	if (mddev->degraded > 1) {
+	if (mddev->degraded > conf->max_degraded) {
 		printk(KERN_ERR "raid5: not enough operational devices for %s"
 			" (%d/%d failed)\n",
 			mdname(mddev), conf->failed_disks, conf->raid_disks);
 		goto abort;
 	}
 
-	if (mddev->degraded == 1 &&
+	if (mddev->degraded > 0 &&
 	    mddev->recovery_cp != MaxSector) {
 		if (mddev->ok_start_degraded)
 			printk(KERN_WARNING
@@ -2379,10 +3244,11 @@
 	}
 
 	/* read-ahead size must cover two whole stripes, which is
-	 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
+	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
 	 */
 	{
-		int stripe = (mddev->raid_disks-1) *
+		int data_disks = conf->previous_raid_disks - conf->max_degraded;
+		int stripe = data_disks *
 			(mddev->chunk_size / PAGE_SIZE);
 		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
 			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
@@ -2393,12 +3259,14 @@
 
 	mddev->queue->unplug_fn = raid5_unplug_device;
 	mddev->queue->issue_flush_fn = raid5_issue_flush;
-	mddev->array_size =  mddev->size * (conf->previous_raid_disks - 1);
+	mddev->array_size =  mddev->size * (conf->previous_raid_disks -
+					    conf->max_degraded);
 
 	return 0;
 abort:
 	if (conf) {
 		print_raid5_conf(conf);
+		safe_put_page(conf->spare_page);
 		kfree(conf->disks);
 		kfree(conf->stripe_hashtbl);
 		kfree(conf);
@@ -2427,23 +3295,23 @@
 }
 
 #if RAID5_DEBUG
-static void print_sh (struct stripe_head *sh)
+static void print_sh (struct seq_file *seq, struct stripe_head *sh)
 {
 	int i;
 
-	printk("sh %llu, pd_idx %d, state %ld.\n",
-		(unsigned long long)sh->sector, sh->pd_idx, sh->state);
-	printk("sh %llu,  count %d.\n",
-		(unsigned long long)sh->sector, atomic_read(&sh->count));
-	printk("sh %llu, ", (unsigned long long)sh->sector);
+	seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
+		   (unsigned long long)sh->sector, sh->pd_idx, sh->state);
+	seq_printf(seq, "sh %llu,  count %d.\n",
+		   (unsigned long long)sh->sector, atomic_read(&sh->count));
+	seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
 	for (i = 0; i < sh->disks; i++) {
-		printk("(cache%d: %p %ld) ", 
-			i, sh->dev[i].page, sh->dev[i].flags);
+		seq_printf(seq, "(cache%d: %p %ld) ",
+			   i, sh->dev[i].page, sh->dev[i].flags);
 	}
-	printk("\n");
+	seq_printf(seq, "\n");
 }
 
-static void printall (raid5_conf_t *conf)
+static void printall (struct seq_file *seq, raid5_conf_t *conf)
 {
 	struct stripe_head *sh;
 	struct hlist_node *hn;
@@ -2454,7 +3322,7 @@
 		hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
 			if (sh->raid_conf != conf)
 				continue;
-			print_sh(sh);
+			print_sh(seq, sh);
 		}
 	}
 	spin_unlock_irq(&conf->device_lock);
@@ -2474,9 +3342,8 @@
 			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
 	seq_printf (seq, "]");
 #if RAID5_DEBUG
-#define D(x) \
-	seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
-	printall(conf);
+	seq_printf (seq, "\n");
+	printall(seq, conf);
 #endif
 }
 
@@ -2560,14 +3427,20 @@
 	int disk;
 	struct disk_info *p;
 
-	if (mddev->degraded > 1)
+	if (mddev->degraded > conf->max_degraded)
 		/* no point adding a device */
 		return 0;
 
 	/*
-	 * find the disk ...
+	 * find the disk ... but prefer rdev->saved_raid_disk
+	 * if possible.
 	 */
-	for (disk=0; disk < conf->raid_disks; disk++)
+	if (rdev->saved_raid_disk >= 0 &&
+	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
+		disk = rdev->saved_raid_disk;
+	else
+		disk = 0;
+	for ( ; disk < conf->raid_disks; disk++)
 		if ((p=conf->disks + disk)->rdev == NULL) {
 			clear_bit(In_sync, &rdev->flags);
 			rdev->raid_disk = disk;
@@ -2590,8 +3463,10 @@
 	 * any io in the removed space completes, but it hardly seems
 	 * worth it.
 	 */
+	raid5_conf_t *conf = mddev_to_conf(mddev);
+
 	sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
-	mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
+	mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
 	set_capacity(mddev->gendisk, mddev->array_size << 1);
 	mddev->changed = 1;
 	if (sectors/2  > mddev->size && mddev->recovery_cp == MaxSector) {
@@ -2731,6 +3606,17 @@
 		conf->expand_progress = MaxSector;
 		spin_unlock_irq(&conf->device_lock);
 		conf->mddev->reshape_position = MaxSector;
+
+		/* read-ahead size must cover two whole stripes, which is
+		 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
+		 */
+		{
+			int data_disks = conf->previous_raid_disks - conf->max_degraded;
+			int stripe = data_disks *
+				(conf->mddev->chunk_size / PAGE_SIZE);
+			if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+				conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+		}
 	}
 }
 
@@ -2762,6 +3648,23 @@
 	}
 }
 
+static struct mdk_personality raid6_personality =
+{
+	.name		= "raid6",
+	.level		= 6,
+	.owner		= THIS_MODULE,
+	.make_request	= make_request,
+	.run		= run,
+	.stop		= stop,
+	.status		= status,
+	.error_handler	= error,
+	.hot_add_disk	= raid5_add_disk,
+	.hot_remove_disk= raid5_remove_disk,
+	.spare_active	= raid5_spare_active,
+	.sync_request	= sync_request,
+	.resize		= raid5_resize,
+	.quiesce	= raid5_quiesce,
+};
 static struct mdk_personality raid5_personality =
 {
 	.name		= "raid5",
@@ -2804,6 +3707,12 @@
 
 static int __init raid5_init(void)
 {
+	int e;
+
+	e = raid6_select_algo();
+	if ( e )
+		return e;
+	register_md_personality(&raid6_personality);
 	register_md_personality(&raid5_personality);
 	register_md_personality(&raid4_personality);
 	return 0;
@@ -2811,6 +3720,7 @@
 
 static void raid5_exit(void)
 {
+	unregister_md_personality(&raid6_personality);
 	unregister_md_personality(&raid5_personality);
 	unregister_md_personality(&raid4_personality);
 }
@@ -2823,3 +3733,10 @@
 MODULE_ALIAS("md-raid4");
 MODULE_ALIAS("md-level-5");
 MODULE_ALIAS("md-level-4");
+MODULE_ALIAS("md-personality-8"); /* RAID6 */
+MODULE_ALIAS("md-raid6");
+MODULE_ALIAS("md-level-6");
+
+/* This used to be two separate modules, they were: */
+MODULE_ALIAS("raid5");
+MODULE_ALIAS("raid6");