Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/lib/Kconfig b/lib/Kconfig
new file mode 100644
index 0000000..eeb4522
--- /dev/null
+++ b/lib/Kconfig
@@ -0,0 +1,61 @@
+#
+# Library configuration
+#
+
+menu "Library routines"
+
+config CRC_CCITT
+	tristate "CRC-CCITT functions"
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC-CCITT functions, but a module built outside
+	  the kernel tree does. Such modules that use library CRC-CCITT
+	  functions require M here.
+
+config CRC32
+	tristate "CRC32 functions"
+	default y
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC32 functions, but a module built outside the
+	  kernel tree does. Such modules that use library CRC32 functions
+	  require M here.
+
+config LIBCRC32C
+	tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC32c functions, but a module built outside the
+	  kernel tree does. Such modules that use library CRC32c functions
+	  require M here.  See Castagnoli93.
+	  Module will be libcrc32c.
+
+#
+# compression support is select'ed if needed
+#
+config ZLIB_INFLATE
+	tristate
+
+config ZLIB_DEFLATE
+	tristate
+
+#
+# reed solomon support is select'ed if needed
+#
+config REED_SOLOMON
+	tristate
+	
+config REED_SOLOMON_ENC8
+	boolean
+
+config REED_SOLOMON_DEC8
+	boolean
+
+config REED_SOLOMON_ENC16
+	boolean
+
+config REED_SOLOMON_DEC16
+	boolean
+
+endmenu
+
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
new file mode 100644
index 0000000..426a0cf
--- /dev/null
+++ b/lib/Kconfig.debug
@@ -0,0 +1,159 @@
+
+config PRINTK_TIME
+	bool "Show timing information on printks"
+	help
+	  Selecting this option causes timing information to be
+	  included in printk output.  This allows you to measure
+	  the interval between kernel operations, including bootup
+	  operations.  This is useful for identifying long delays
+	  in kernel startup.
+
+
+config DEBUG_KERNEL
+	bool "Kernel debugging"
+	help
+	  Say Y here if you are developing drivers or trying to debug and
+	  identify kernel problems.
+
+config MAGIC_SYSRQ
+	bool "Magic SysRq key"
+	depends on DEBUG_KERNEL && !UML
+	help
+	  If you say Y here, you will have some control over the system even
+	  if the system crashes for example during kernel debugging (e.g., you
+	  will be able to flush the buffer cache to disk, reboot the system
+	  immediately or dump some status information). This is accomplished
+	  by pressing various keys while holding SysRq (Alt+PrintScreen). It
+	  also works on a serial console (on PC hardware at least), if you
+	  send a BREAK and then within 5 seconds a command keypress. The
+	  keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+	  unless you really know what this hack does.
+
+config LOG_BUF_SHIFT
+	int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
+	range 12 21
+	default 17 if ARCH_S390
+	default 16 if X86_NUMAQ || IA64
+	default 15 if SMP
+	default 14
+	help
+	  Select kernel log buffer size as a power of 2.
+	  Defaults and Examples:
+	  	     17 => 128 KB for S/390
+		     16 => 64 KB for x86 NUMAQ or IA-64
+	             15 => 32 KB for SMP
+	             14 => 16 KB for uniprocessor
+		     13 =>  8 KB
+		     12 =>  4 KB
+
+config SCHEDSTATS
+	bool "Collect scheduler statistics"
+	depends on DEBUG_KERNEL && PROC_FS
+	help
+	  If you say Y here, additional code will be inserted into the
+	  scheduler and related routines to collect statistics about
+	  scheduler behavior and provide them in /proc/schedstat.  These
+	  stats may be useful for both tuning and debugging the scheduler
+	  If you aren't debugging the scheduler or trying to tune a specific
+	  application, you can say N to avoid the very slight overhead
+	  this adds.
+
+config DEBUG_SLAB
+	bool "Debug memory allocations"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here to have the kernel do limited verification on memory
+	  allocation as well as poisoning memory on free to catch use of freed
+	  memory. This can make kmalloc/kfree-intensive workloads much slower.
+
+config DEBUG_PREEMPT
+	bool "Debug preemptible kernel"
+	depends on DEBUG_KERNEL && PREEMPT
+	default y
+	help
+	  If you say Y here then the kernel will use a debug variant of the
+	  commonly used smp_processor_id() function and will print warnings
+	  if kernel code uses it in a preemption-unsafe way. Also, the kernel
+	  will detect preemption count underflows.
+
+config DEBUG_SPINLOCK
+	bool "Spinlock debugging"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here and build SMP to catch missing spinlock initialization
+	  and certain other kinds of spinlock errors commonly made.  This is
+	  best used in conjunction with the NMI watchdog so that spinlock
+	  deadlocks are also debuggable.
+
+config DEBUG_SPINLOCK_SLEEP
+	bool "Sleep-inside-spinlock checking"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here, various routines which may sleep will become very
+	  noisy if they are called with a spinlock held.
+
+config DEBUG_KOBJECT
+	bool "kobject debugging"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here, some extra kobject debugging messages will be sent
+	  to the syslog. 
+
+config DEBUG_HIGHMEM
+	bool "Highmem debugging"
+	depends on DEBUG_KERNEL && HIGHMEM
+	help
+	  This options enables addition error checking for high memory systems.
+	  Disable for production systems.
+
+config DEBUG_BUGVERBOSE
+	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+	depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV
+	default !EMBEDDED
+	help
+	  Say Y here to make BUG() panics output the file name and line number
+	  of the BUG call as well as the EIP and oops trace.  This aids
+	  debugging but costs about 70-100K of memory.
+
+config DEBUG_INFO
+	bool "Compile the kernel with debug info"
+	depends on DEBUG_KERNEL
+	help
+          If you say Y here the resulting kernel image will include
+	  debugging info resulting in a larger kernel image.
+	  Say Y here only if you plan to debug the kernel.
+
+	  If unsure, say N.
+
+config DEBUG_IOREMAP
+	bool "Enable ioremap() debugging"
+	depends on DEBUG_KERNEL && PARISC
+	help
+	  Enabling this option will cause the kernel to distinguish between
+	  ioremapped and physical addresses.  It will print a backtrace (at
+	  most one every 10 seconds), hopefully allowing you to see which
+	  drivers need work.  Fixing all these problems is a prerequisite
+	  for turning on USE_HPPA_IOREMAP.  The warnings are harmless;
+	  the kernel has enough information to fix the broken drivers
+	  automatically, but we'd like to make it more efficient by not
+	  having to do that.
+
+config DEBUG_FS
+	bool "Debug Filesystem"
+	depends on DEBUG_KERNEL
+	help
+	  debugfs is a virtual file system that kernel developers use to put
+	  debugging files into.  Enable this option to be able to read and
+	  write to these files.
+
+	  If unsure, say N.
+
+config FRAME_POINTER
+	bool "Compile the kernel with frame pointers"
+	depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV)
+	help
+	  If you say Y here the resulting kernel image will be slightly larger
+	  and slower, but it will give very useful debugging information.
+	  If you don't debug the kernel, you can say N, but we may not be able
+	  to solve problems without frame pointers.
+
diff --git a/lib/Makefile b/lib/Makefile
new file mode 100644
index 0000000..7c70db7
--- /dev/null
+++ b/lib/Makefile
@@ -0,0 +1,45 @@
+#
+# Makefile for some libs needed in the kernel.
+#
+
+lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
+	 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
+	 kobject.o kref.o idr.o div64.o int_sqrt.o \
+	 bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \
+	 halfmd4.o
+
+obj-y += sort.o parser.o
+
+ifeq ($(CONFIG_DEBUG_KOBJECT),y)
+CFLAGS_kobject.o += -DDEBUG
+CFLAGS_kobject_uevent.o += -DDEBUG
+endif
+
+lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
+obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+
+ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 
+  lib-y += dec_and_lock.o
+endif
+
+obj-$(CONFIG_CRC_CCITT)	+= crc-ccitt.o
+obj-$(CONFIG_CRC32)	+= crc32.o
+obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
+obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+
+obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
+obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
+obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
+
+hostprogs-y	:= gen_crc32table
+clean-files	:= crc32table.h
+
+$(obj)/crc32.o: $(obj)/crc32table.h
+
+quiet_cmd_crc32 = GEN     $@
+      cmd_crc32 = $< > $@
+
+$(obj)/crc32table.h: $(obj)/gen_crc32table
+	$(call cmd,crc32)
diff --git a/lib/bitmap.c b/lib/bitmap.c
new file mode 100644
index 0000000..d1388a5
--- /dev/null
+++ b/lib/bitmap.c
@@ -0,0 +1,595 @@
+/*
+ * lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+
+/*
+ * bitmaps provide an array of bits, implemented using an an
+ * array of unsigned longs.  The number of valid bits in a
+ * given bitmap does _not_ need to be an exact multiple of
+ * BITS_PER_LONG.
+ *
+ * The possible unused bits in the last, partially used word
+ * of a bitmap are 'don't care'.  The implementation makes
+ * no particular effort to keep them zero.  It ensures that
+ * their value will not affect the results of any operation.
+ * The bitmap operations that return Boolean (bitmap_empty,
+ * for example) or scalar (bitmap_weight, for example) results
+ * carefully filter out these unused bits from impacting their
+ * results.
+ *
+ * These operations actually hold to a slightly stronger rule:
+ * if you don't input any bitmaps to these ops that have some
+ * unused bits set, then they won't output any set unused bits
+ * in output bitmaps.
+ *
+ * The byte ordering of bitmaps is more natural on little
+ * endian architectures.  See the big-endian headers
+ * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
+ * for the best explanations of this ordering.
+ */
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		if (bitmap[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+			return 0;
+
+	return 1;
+}
+EXPORT_SYMBOL(__bitmap_empty);
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		if (~bitmap[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+			return 0;
+
+	return 1;
+}
+EXPORT_SYMBOL(__bitmap_full);
+
+int __bitmap_equal(const unsigned long *bitmap1,
+		const unsigned long *bitmap2, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		if (bitmap1[k] != bitmap2[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+			return 0;
+
+	return 1;
+}
+EXPORT_SYMBOL(__bitmap_equal);
+
+void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		dst[k] = ~src[k];
+
+	if (bits % BITS_PER_LONG)
+		dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
+}
+EXPORT_SYMBOL(__bitmap_complement);
+
+/*
+ * __bitmap_shift_right - logical right shift of the bits in a bitmap
+ *   @dst - destination bitmap
+ *   @src - source bitmap
+ *   @nbits - shift by this many bits
+ *   @bits - bitmap size, in bits
+ *
+ * Shifting right (dividing) means moving bits in the MS -> LS bit
+ * direction.  Zeros are fed into the vacated MS positions and the
+ * LS bits shifted off the bottom are lost.
+ */
+void __bitmap_shift_right(unsigned long *dst,
+			const unsigned long *src, int shift, int bits)
+{
+	int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
+	int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+	unsigned long mask = (1UL << left) - 1;
+	for (k = 0; off + k < lim; ++k) {
+		unsigned long upper, lower;
+
+		/*
+		 * If shift is not word aligned, take lower rem bits of
+		 * word above and make them the top rem bits of result.
+		 */
+		if (!rem || off + k + 1 >= lim)
+			upper = 0;
+		else {
+			upper = src[off + k + 1];
+			if (off + k + 1 == lim - 1 && left)
+				upper &= mask;
+		}
+		lower = src[off + k];
+		if (left && off + k == lim - 1)
+			lower &= mask;
+		dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
+		if (left && k == lim - 1)
+			dst[k] &= mask;
+	}
+	if (off)
+		memset(&dst[lim - off], 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_right);
+
+
+/*
+ * __bitmap_shift_left - logical left shift of the bits in a bitmap
+ *   @dst - destination bitmap
+ *   @src - source bitmap
+ *   @nbits - shift by this many bits
+ *   @bits - bitmap size, in bits
+ *
+ * Shifting left (multiplying) means moving bits in the LS -> MS
+ * direction.  Zeros are fed into the vacated LS bit positions
+ * and those MS bits shifted off the top are lost.
+ */
+
+void __bitmap_shift_left(unsigned long *dst,
+			const unsigned long *src, int shift, int bits)
+{
+	int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
+	int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+	for (k = lim - off - 1; k >= 0; --k) {
+		unsigned long upper, lower;
+
+		/*
+		 * If shift is not word aligned, take upper rem bits of
+		 * word below and make them the bottom rem bits of result.
+		 */
+		if (rem && k > 0)
+			lower = src[k - 1];
+		else
+			lower = 0;
+		upper = src[k];
+		if (left && k == lim - 1)
+			upper &= (1UL << left) - 1;
+		dst[k + off] = lower  >> (BITS_PER_LONG - rem) | upper << rem;
+		if (left && k + off == lim - 1)
+			dst[k + off] &= (1UL << left) - 1;
+	}
+	if (off)
+		memset(dst, 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_left);
+
+void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k;
+	int nr = BITS_TO_LONGS(bits);
+
+	for (k = 0; k < nr; k++)
+		dst[k] = bitmap1[k] & bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_and);
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k;
+	int nr = BITS_TO_LONGS(bits);
+
+	for (k = 0; k < nr; k++)
+		dst[k] = bitmap1[k] | bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_or);
+
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k;
+	int nr = BITS_TO_LONGS(bits);
+
+	for (k = 0; k < nr; k++)
+		dst[k] = bitmap1[k] ^ bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_xor);
+
+void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k;
+	int nr = BITS_TO_LONGS(bits);
+
+	for (k = 0; k < nr; k++)
+		dst[k] = bitmap1[k] & ~bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_andnot);
+
+int __bitmap_intersects(const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		if (bitmap1[k] & bitmap2[k])
+			return 1;
+
+	if (bits % BITS_PER_LONG)
+		if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+			return 1;
+	return 0;
+}
+EXPORT_SYMBOL(__bitmap_intersects);
+
+int __bitmap_subset(const unsigned long *bitmap1,
+				const unsigned long *bitmap2, int bits)
+{
+	int k, lim = bits/BITS_PER_LONG;
+	for (k = 0; k < lim; ++k)
+		if (bitmap1[k] & ~bitmap2[k])
+			return 0;
+
+	if (bits % BITS_PER_LONG)
+		if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+			return 0;
+	return 1;
+}
+EXPORT_SYMBOL(__bitmap_subset);
+
+#if BITS_PER_LONG == 32
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+	int k, w = 0, lim = bits/BITS_PER_LONG;
+
+	for (k = 0; k < lim; k++)
+		w += hweight32(bitmap[k]);
+
+	if (bits % BITS_PER_LONG)
+		w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+	return w;
+}
+#else
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+	int k, w = 0, lim = bits/BITS_PER_LONG;
+
+	for (k = 0; k < lim; k++)
+		w += hweight64(bitmap[k]);
+
+	if (bits % BITS_PER_LONG)
+		w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+	return w;
+}
+#endif
+EXPORT_SYMBOL(__bitmap_weight);
+
+/*
+ * Bitmap printing & parsing functions: first version by Bill Irwin,
+ * second version by Paul Jackson, third by Joe Korty.
+ */
+
+#define CHUNKSZ				32
+#define nbits_to_hold_value(val)	fls(val)
+#define roundup_power2(val,modulus)	(((val) + (modulus) - 1) & ~((modulus) - 1))
+#define unhex(c)			(isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
+#define BASEDEC 10		/* fancier cpuset lists input in decimal */
+
+/**
+ * bitmap_scnprintf - convert bitmap to an ASCII hex string.
+ * @buf: byte buffer into which string is placed
+ * @buflen: reserved size of @buf, in bytes
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Exactly @nmaskbits bits are displayed.  Hex digits are grouped into
+ * comma-separated sets of eight digits per set.
+ */
+int bitmap_scnprintf(char *buf, unsigned int buflen,
+	const unsigned long *maskp, int nmaskbits)
+{
+	int i, word, bit, len = 0;
+	unsigned long val;
+	const char *sep = "";
+	int chunksz;
+	u32 chunkmask;
+
+	chunksz = nmaskbits & (CHUNKSZ - 1);
+	if (chunksz == 0)
+		chunksz = CHUNKSZ;
+
+	i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+	for (; i >= 0; i -= CHUNKSZ) {
+		chunkmask = ((1ULL << chunksz) - 1);
+		word = i / BITS_PER_LONG;
+		bit = i % BITS_PER_LONG;
+		val = (maskp[word] >> bit) & chunkmask;
+		len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
+			(chunksz+3)/4, val);
+		chunksz = CHUNKSZ;
+		sep = ",";
+	}
+	return len;
+}
+EXPORT_SYMBOL(bitmap_scnprintf);
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @buf: pointer to buffer in user space containing string.
+ * @buflen: buffer size in bytes.  If string is smaller than this
+ *    then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks.  Each chunk defines exactly 32
+ * bits of the resultant bitmask.  No chunk may specify a value larger
+ * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended.  -EINVAL is returned for illegal
+ * characters and for grouping errors such as "1,,5", ",44", "," and "".
+ * Leading and trailing whitespace accepted, but not embedded whitespace.
+ */
+int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
+        unsigned long *maskp, int nmaskbits)
+{
+	int c, old_c, totaldigits, ndigits, nchunks, nbits;
+	u32 chunk;
+
+	bitmap_zero(maskp, nmaskbits);
+
+	nchunks = nbits = totaldigits = c = 0;
+	do {
+		chunk = ndigits = 0;
+
+		/* Get the next chunk of the bitmap */
+		while (ubuflen) {
+			old_c = c;
+			if (get_user(c, ubuf++))
+				return -EFAULT;
+			ubuflen--;
+			if (isspace(c))
+				continue;
+
+			/*
+			 * If the last character was a space and the current
+			 * character isn't '\0', we've got embedded whitespace.
+			 * This is a no-no, so throw an error.
+			 */
+			if (totaldigits && c && isspace(old_c))
+				return -EINVAL;
+
+			/* A '\0' or a ',' signal the end of the chunk */
+			if (c == '\0' || c == ',')
+				break;
+
+			if (!isxdigit(c))
+				return -EINVAL;
+
+			/*
+			 * Make sure there are at least 4 free bits in 'chunk'.
+			 * If not, this hexdigit will overflow 'chunk', so
+			 * throw an error.
+			 */
+			if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
+				return -EOVERFLOW;
+
+			chunk = (chunk << 4) | unhex(c);
+			ndigits++; totaldigits++;
+		}
+		if (ndigits == 0)
+			return -EINVAL;
+		if (nchunks == 0 && chunk == 0)
+			continue;
+
+		__bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
+		*maskp |= chunk;
+		nchunks++;
+		nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
+		if (nbits > nmaskbits)
+			return -EOVERFLOW;
+	} while (ubuflen && c == ',');
+
+	return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
+
+/*
+ * bscnl_emit(buf, buflen, rbot, rtop, bp)
+ *
+ * Helper routine for bitmap_scnlistprintf().  Write decimal number
+ * or range to buf, suppressing output past buf+buflen, with optional
+ * comma-prefix.  Return len of what would be written to buf, if it
+ * all fit.
+ */
+static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
+{
+	if (len > 0)
+		len += scnprintf(buf + len, buflen - len, ",");
+	if (rbot == rtop)
+		len += scnprintf(buf + len, buflen - len, "%d", rbot);
+	else
+		len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
+	return len;
+}
+
+/**
+ * bitmap_scnlistprintf - convert bitmap to list format ASCII string
+ * @buf: byte buffer into which string is placed
+ * @buflen: reserved size of @buf, in bytes
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges.  Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.  Output format is compatible with the format
+ * accepted as input by bitmap_parselist().
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing '\0', as
+ * per ISO C99.
+ */
+int bitmap_scnlistprintf(char *buf, unsigned int buflen,
+	const unsigned long *maskp, int nmaskbits)
+{
+	int len = 0;
+	/* current bit is 'cur', most recently seen range is [rbot, rtop] */
+	int cur, rbot, rtop;
+
+	rbot = cur = find_first_bit(maskp, nmaskbits);
+	while (cur < nmaskbits) {
+		rtop = cur;
+		cur = find_next_bit(maskp, nmaskbits, cur+1);
+		if (cur >= nmaskbits || cur > rtop + 1) {
+			len = bscnl_emit(buf, buflen, rbot, rtop, len);
+			rbot = cur;
+		}
+	}
+	return len;
+}
+EXPORT_SYMBOL(bitmap_scnlistprintf);
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read nul-terminated user string from this buffer
+ * @mask: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges.  Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ *
+ * Returns 0 on success, -errno on invalid input strings:
+ *    -EINVAL:   second number in range smaller than first
+ *    -EINVAL:   invalid character in string
+ *    -ERANGE:   bit number specified too large for mask
+ */
+int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
+{
+	unsigned a, b;
+
+	bitmap_zero(maskp, nmaskbits);
+	do {
+		if (!isdigit(*bp))
+			return -EINVAL;
+		b = a = simple_strtoul(bp, (char **)&bp, BASEDEC);
+		if (*bp == '-') {
+			bp++;
+			if (!isdigit(*bp))
+				return -EINVAL;
+			b = simple_strtoul(bp, (char **)&bp, BASEDEC);
+		}
+		if (!(a <= b))
+			return -EINVAL;
+		if (b >= nmaskbits)
+			return -ERANGE;
+		while (a <= b) {
+			set_bit(a, maskp);
+			a++;
+		}
+		if (*bp == ',')
+			bp++;
+	} while (*bp != '\0' && *bp != '\n');
+	return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+/**
+ *	bitmap_find_free_region - find a contiguous aligned mem region
+ *	@bitmap: an array of unsigned longs corresponding to the bitmap
+ *	@bits: number of bits in the bitmap
+ *	@order: region size to find (size is actually 1<<order)
+ *
+ * This is used to allocate a memory region from a bitmap.  The idea is
+ * that the region has to be 1<<order sized and 1<<order aligned (this
+ * makes the search algorithm much faster).
+ *
+ * The region is marked as set bits in the bitmap if a free one is
+ * found.
+ *
+ * Returns either beginning of region or negative error
+ */
+int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+{
+	unsigned long mask;
+	int pages = 1 << order;
+	int i;
+
+	if(pages > BITS_PER_LONG)
+		return -EINVAL;
+
+	/* make a mask of the order */
+	mask = (1ul << (pages - 1));
+	mask += mask - 1;
+
+	/* run up the bitmap pages bits at a time */
+	for (i = 0; i < bits; i += pages) {
+		int index = i/BITS_PER_LONG;
+		int offset = i - (index * BITS_PER_LONG);
+		if((bitmap[index] & (mask << offset)) == 0) {
+			/* set region in bimap */
+			bitmap[index] |= (mask << offset);
+			return i;
+		}
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(bitmap_find_free_region);
+
+/**
+ *	bitmap_release_region - release allocated bitmap region
+ *	@bitmap: a pointer to the bitmap
+ *	@pos: the beginning of the region
+ *	@order: the order of the bits to release (number is 1<<order)
+ *
+ * This is the complement to __bitmap_find_free_region and releases
+ * the found region (by clearing it in the bitmap).
+ */
+void bitmap_release_region(unsigned long *bitmap, int pos, int order)
+{
+	int pages = 1 << order;
+	unsigned long mask = (1ul << (pages - 1));
+	int index = pos/BITS_PER_LONG;
+	int offset = pos - (index * BITS_PER_LONG);
+	mask += mask - 1;
+	bitmap[index] &= ~(mask << offset);
+}
+EXPORT_SYMBOL(bitmap_release_region);
+
+int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+{
+	int pages = 1 << order;
+	unsigned long mask = (1ul << (pages - 1));
+	int index = pos/BITS_PER_LONG;
+	int offset = pos - (index * BITS_PER_LONG);
+
+	/* We don't do regions of pages > BITS_PER_LONG.  The
+	 * algorithm would be a simple look for multiple zeros in the
+	 * array, but there's no driver today that needs this.  If you
+	 * trip this BUG(), you get to code it... */
+	BUG_ON(pages > BITS_PER_LONG);
+	mask += mask - 1;
+	if (bitmap[index] & (mask << offset))
+		return -EBUSY;
+	bitmap[index] |= (mask << offset);
+	return 0;
+}
+EXPORT_SYMBOL(bitmap_allocate_region);
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
new file mode 100644
index 0000000..6bb7319
--- /dev/null
+++ b/lib/bust_spinlocks.c
@@ -0,0 +1,39 @@
+/*
+ * lib/bust_spinlocks.c
+ *
+ * Provides a minimal bust_spinlocks for architectures which don't have one of their own.
+ *
+ * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
+ * and panic() information from reaching the user.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/tty.h>
+#include <linux/wait.h>
+#include <linux/vt_kern.h>
+
+
+void bust_spinlocks(int yes)
+{
+	if (yes) {
+		oops_in_progress = 1;
+	} else {
+		int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+		unblank_screen();
+#endif
+		oops_in_progress = 0;
+		/*
+		 * OK, the message is on the console.  Now we call printk()
+		 * without oops_in_progress set so that printk() will give klogd
+		 * and the blanked console a poke.  Hold onto your hats...
+		 */
+		console_loglevel = 15;		/* NMI oopser may have shut the console up */
+		printk(" ");
+		console_loglevel = loglevel_save;
+	}
+}
+
+
diff --git a/lib/cmdline.c b/lib/cmdline.c
new file mode 100644
index 0000000..0331ed82
--- /dev/null
+++ b/lib/cmdline.c
@@ -0,0 +1,120 @@
+/*
+ * linux/lib/cmdline.c
+ * Helper functions generally used for parsing kernel command line
+ * and module options.
+ *
+ * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ *
+ * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+
+/**
+ *	get_option - Parse integer from an option string
+ *	@str: option string
+ *	@pint: (output) integer value parsed from @str
+ *
+ *	Read an int from an option string; if available accept a subsequent
+ *	comma as well.
+ *
+ *	Return values:
+ *	0 : no int in string
+ *	1 : int found, no subsequent comma
+ *	2 : int found including a subsequent comma
+ */
+
+int get_option (char **str, int *pint)
+{
+	char *cur = *str;
+
+	if (!cur || !(*cur))
+		return 0;
+	*pint = simple_strtol (cur, str, 0);
+	if (cur == *str)
+		return 0;
+	if (**str == ',') {
+		(*str)++;
+		return 2;
+	}
+
+	return 1;
+}
+
+/**
+ *	get_options - Parse a string into a list of integers
+ *	@str: String to be parsed
+ *	@nints: size of integer array
+ *	@ints: integer array
+ *
+ *	This function parses a string containing a comma-separated
+ *	list of integers.  The parse halts when the array is
+ *	full, or when no more numbers can be retrieved from the
+ *	string.
+ *
+ *	Return value is the character in the string which caused
+ *	the parse to end (typically a null terminator, if @str is
+ *	completely parseable).
+ */
+ 
+char *get_options(const char *str, int nints, int *ints)
+{
+	int res, i = 1;
+
+	while (i < nints) {
+		res = get_option ((char **)&str, ints + i);
+		if (res == 0)
+			break;
+		i++;
+		if (res == 1)
+			break;
+	}
+	ints[0] = i - 1;
+	return (char *)str;
+}
+
+/**
+ *	memparse - parse a string with mem suffixes into a number
+ *	@ptr: Where parse begins
+ *	@retptr: (output) Pointer to next char after parse completes
+ *
+ *	Parses a string into a number.  The number stored at @ptr is
+ *	potentially suffixed with %K (for kilobytes, or 1024 bytes),
+ *	%M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
+ *	1073741824).  If the number is suffixed with K, M, or G, then
+ *	the return value is the number multiplied by one kilobyte, one
+ *	megabyte, or one gigabyte, respectively.
+ */
+
+unsigned long long memparse (char *ptr, char **retptr)
+{
+	unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+
+	switch (**retptr) {
+	case 'G':
+	case 'g':
+		ret <<= 10;
+	case 'M':
+	case 'm':
+		ret <<= 10;
+	case 'K':
+	case 'k':
+		ret <<= 10;
+		(*retptr)++;
+	default:
+		break;
+	}
+	return ret;
+}
+
+
+EXPORT_SYMBOL(memparse);
+EXPORT_SYMBOL(get_option);
+EXPORT_SYMBOL(get_options);
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
new file mode 100644
index 0000000..115d149
--- /dev/null
+++ b/lib/crc-ccitt.c
@@ -0,0 +1,69 @@
+/*
+ *	linux/lib/crc-ccitt.c
+ *
+ *	This source code is licensed under the GNU General Public License,
+ *	Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-ccitt.h>
+
+/*
+ * This mysterious table is just the CRC of each possible byte. It can be
+ * computed using the standard bit-at-a-time methods. The polynomial can
+ * be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
+ * Add the implicit x^16, and you have the standard CRC-CCITT.
+ */
+u16 const crc_ccitt_table[256] = {
+	0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+	0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+	0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+	0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+	0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+	0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+	0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+	0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+	0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+	0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+	0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+	0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+	0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+	0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+	0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+	0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+	0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+	0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+	0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+	0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+	0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+	0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+	0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+	0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+	0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+	0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+	0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+	0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+	0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+	0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+	0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+	0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+EXPORT_SYMBOL(crc_ccitt_table);
+
+/**
+ *	crc_ccitt - recompute the CRC for the data buffer
+ *	@crc - previous CRC value
+ *	@buffer - data pointer
+ *	@len - number of bytes in the buffer
+ */
+u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
+{
+	while (len--)
+		crc = crc_ccitt_byte(crc, *buffer++);
+	return crc;
+}
+EXPORT_SYMBOL(crc_ccitt);
+
+MODULE_DESCRIPTION("CRC-CCITT calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
new file mode 100644
index 0000000..58b2227
--- /dev/null
+++ b/lib/crc32.c
@@ -0,0 +1,529 @@
+/*
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com.  Thanks!
+ * Code was from the public domain, copyright abandoned.  Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32().  Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0.  The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end.  Then individual
+ * users can do whatever they need.
+ *   drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ *   fs/jffs2 uses seed 0, doesn't xor with ~0.
+ *   fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include "crc32defs.h"
+#if CRC_LE_BITS == 8
+#define tole(x) __constant_cpu_to_le32(x)
+#define tobe(x) __constant_cpu_to_be32(x)
+#else
+#define tole(x) (x)
+#define tobe(x) (x)
+#endif
+#include "crc32table.h"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("Ethernet CRC32 calculations");
+MODULE_LICENSE("GPL");
+
+#if CRC_LE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc - seed value for computation.  ~0 for Ethernet, sometimes 0 for
+ *        other uses, or the previous crc32 value if computing incrementally.
+ * @p   - pointer to buffer over which CRC is run
+ * @len - length of buffer @p
+ * 
+ */
+u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+	int i;
+	while (len--) {
+		crc ^= *p++;
+		for (i = 0; i < 8; i++)
+			crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
+	}
+	return crc;
+}
+#else				/* Table-based approach */
+
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc - seed value for computation.  ~0 for Ethernet, sometimes 0 for
+ *        other uses, or the previous crc32 value if computing incrementally.
+ * @p   - pointer to buffer over which CRC is run
+ * @len - length of buffer @p
+ * 
+ */
+u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_LE_BITS == 8
+	const u32      *b =(u32 *)p;
+	const u32      *tab = crc32table_le;
+
+# ifdef __LITTLE_ENDIAN
+#  define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+#  define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+
+	crc = __cpu_to_le32(crc);
+	/* Align it */
+	if(unlikely(((long)b)&3 && len)){
+		do {
+			u8 *p = (u8 *)b;
+			DO_CRC(*p++);
+			b = (void *)p;
+		} while ((--len) && ((long)b)&3 );
+	}
+	if(likely(len >= 4)){
+		/* load data 32 bits wide, xor data 32 bits wide. */
+		size_t save_len = len & 3;
+	        len = len >> 2;
+		--b; /* use pre increment below(*++b) for speed */
+		do {
+			crc ^= *++b;
+			DO_CRC(0);
+			DO_CRC(0);
+			DO_CRC(0);
+			DO_CRC(0);
+		} while (--len);
+		b++; /* point to next byte(s) */
+		len = save_len;
+	}
+	/* And the last few bytes */
+	if(len){
+		do {
+			u8 *p = (u8 *)b;
+			DO_CRC(*p++);
+			b = (void *)p;
+		} while (--len);
+	}
+
+	return __le32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_LE_BITS == 4
+	while (len--) {
+		crc ^= *p++;
+		crc = (crc >> 4) ^ crc32table_le[crc & 15];
+		crc = (crc >> 4) ^ crc32table_le[crc & 15];
+	}
+	return crc;
+# elif CRC_LE_BITS == 2
+	while (len--) {
+		crc ^= *p++;
+		crc = (crc >> 2) ^ crc32table_le[crc & 3];
+		crc = (crc >> 2) ^ crc32table_le[crc & 3];
+		crc = (crc >> 2) ^ crc32table_le[crc & 3];
+		crc = (crc >> 2) ^ crc32table_le[crc & 3];
+	}
+	return crc;
+# endif
+}
+#endif
+
+#if CRC_BE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc - seed value for computation.  ~0 for Ethernet, sometimes 0 for
+ *        other uses, or the previous crc32 value if computing incrementally.
+ * @p   - pointer to buffer over which CRC is run
+ * @len - length of buffer @p
+ * 
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+	int i;
+	while (len--) {
+		crc ^= *p++ << 24;
+		for (i = 0; i < 8; i++)
+			crc =
+			    (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE :
+					  0);
+	}
+	return crc;
+}
+
+#else				/* Table-based approach */
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc - seed value for computation.  ~0 for Ethernet, sometimes 0 for
+ *        other uses, or the previous crc32 value if computing incrementally.
+ * @p   - pointer to buffer over which CRC is run
+ * @len - length of buffer @p
+ * 
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_BE_BITS == 8
+	const u32      *b =(u32 *)p;
+	const u32      *tab = crc32table_be;
+
+# ifdef __LITTLE_ENDIAN
+#  define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+#  define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+
+	crc = __cpu_to_be32(crc);
+	/* Align it */
+	if(unlikely(((long)b)&3 && len)){
+		do {
+			u8 *p = (u8 *)b;
+			DO_CRC(*p++);
+			b = (u32 *)p;
+		} while ((--len) && ((long)b)&3 );
+	}
+	if(likely(len >= 4)){
+		/* load data 32 bits wide, xor data 32 bits wide. */
+		size_t save_len = len & 3;
+	        len = len >> 2;
+		--b; /* use pre increment below(*++b) for speed */
+		do {
+			crc ^= *++b;
+			DO_CRC(0);
+			DO_CRC(0);
+			DO_CRC(0);
+			DO_CRC(0);
+		} while (--len);
+		b++; /* point to next byte(s) */
+		len = save_len;
+	}
+	/* And the last few bytes */
+	if(len){
+		do {
+			u8 *p = (u8 *)b;
+			DO_CRC(*p++);
+			b = (void *)p;
+		} while (--len);
+	}
+	return __be32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_BE_BITS == 4
+	while (len--) {
+		crc ^= *p++ << 24;
+		crc = (crc << 4) ^ crc32table_be[crc >> 28];
+		crc = (crc << 4) ^ crc32table_be[crc >> 28];
+	}
+	return crc;
+# elif CRC_BE_BITS == 2
+	while (len--) {
+		crc ^= *p++ << 24;
+		crc = (crc << 2) ^ crc32table_be[crc >> 30];
+		crc = (crc << 2) ^ crc32table_be[crc >> 30];
+		crc = (crc << 2) ^ crc32table_be[crc >> 30];
+		crc = (crc << 2) ^ crc32table_be[crc >> 30];
+	}
+	return crc;
+# endif
+}
+#endif
+
+u32 bitreverse(u32 x)
+{
+	x = (x >> 16) | (x << 16);
+	x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00);
+	x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0);
+	x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc);
+	x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa);
+	return x;
+}
+
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(crc32_be);
+EXPORT_SYMBOL(bitreverse);
+
+/*
+ * A brief CRC tutorial.
+ *
+ * A CRC is a long-division remainder.  You add the CRC to the message,
+ * and the whole thing (message+CRC) is a multiple of the given
+ * CRC polynomial.  To check the CRC, you can either check that the
+ * CRC matches the recomputed value, *or* you can check that the
+ * remainder computed on the message+CRC is 0.  This latter approach
+ * is used by a lot of hardware implementations, and is why so many
+ * protocols put the end-of-frame flag after the CRC.
+ *
+ * It's actually the same long division you learned in school, except that
+ * - We're working in binary, so the digits are only 0 and 1, and
+ * - When dividing polynomials, there are no carries.  Rather than add and
+ *   subtract, we just xor.  Thus, we tend to get a bit sloppy about
+ *   the difference between adding and subtracting.
+ *
+ * A 32-bit CRC polynomial is actually 33 bits long.  But since it's
+ * 33 bits long, bit 32 is always going to be set, so usually the CRC
+ * is written in hex with the most significant bit omitted.  (If you're
+ * familiar with the IEEE 754 floating-point format, it's the same idea.)
+ *
+ * Note that a CRC is computed over a string of *bits*, so you have
+ * to decide on the endianness of the bits within each byte.  To get
+ * the best error-detecting properties, this should correspond to the
+ * order they're actually sent.  For example, standard RS-232 serial is
+ * little-endian; the most significant bit (sometimes used for parity)
+ * is sent last.  And when appending a CRC word to a message, you should
+ * do it in the right order, matching the endianness.
+ *
+ * Just like with ordinary division, the remainder is always smaller than
+ * the divisor (the CRC polynomial) you're dividing by.  Each step of the
+ * division, you take one more digit (bit) of the dividend and append it
+ * to the current remainder.  Then you figure out the appropriate multiple
+ * of the divisor to subtract to being the remainder back into range.
+ * In binary, it's easy - it has to be either 0 or 1, and to make the
+ * XOR cancel, it's just a copy of bit 32 of the remainder.
+ *
+ * When computing a CRC, we don't care about the quotient, so we can
+ * throw the quotient bit away, but subtract the appropriate multiple of
+ * the polynomial from the remainder and we're back to where we started,
+ * ready to process the next bit.
+ *
+ * A big-endian CRC written this way would be coded like:
+ * for (i = 0; i < input_bits; i++) {
+ * 	multiple = remainder & 0x80000000 ? CRCPOLY : 0;
+ * 	remainder = (remainder << 1 | next_input_bit()) ^ multiple;
+ * }
+ * Notice how, to get at bit 32 of the shifted remainder, we look
+ * at bit 31 of the remainder *before* shifting it.
+ *
+ * But also notice how the next_input_bit() bits we're shifting into
+ * the remainder don't actually affect any decision-making until
+ * 32 bits later.  Thus, the first 32 cycles of this are pretty boring.
+ * Also, to add the CRC to a message, we need a 32-bit-long hole for it at
+ * the end, so we have to add 32 extra cycles shifting in zeros at the
+ * end of every message,
+ *
+ * So the standard trick is to rearrage merging in the next_input_bit()
+ * until the moment it's needed.  Then the first 32 cycles can be precomputed,
+ * and merging in the final 32 zero bits to make room for the CRC can be
+ * skipped entirely.
+ * This changes the code to:
+ * for (i = 0; i < input_bits; i++) {
+ *      remainder ^= next_input_bit() << 31;
+ * 	multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * 	remainder = (remainder << 1) ^ multiple;
+ * }
+ * With this optimization, the little-endian code is simpler:
+ * for (i = 0; i < input_bits; i++) {
+ *      remainder ^= next_input_bit();
+ * 	multiple = (remainder & 1) ? CRCPOLY : 0;
+ * 	remainder = (remainder >> 1) ^ multiple;
+ * }
+ *
+ * Note that the other details of endianness have been hidden in CRCPOLY
+ * (which must be bit-reversed) and next_input_bit().
+ *
+ * However, as long as next_input_bit is returning the bits in a sensible
+ * order, we can actually do the merging 8 or more bits at a time rather
+ * than one bit at a time:
+ * for (i = 0; i < input_bytes; i++) {
+ * 	remainder ^= next_input_byte() << 24;
+ * 	for (j = 0; j < 8; j++) {
+ * 		multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * 		remainder = (remainder << 1) ^ multiple;
+ * 	}
+ * }
+ * Or in little-endian:
+ * for (i = 0; i < input_bytes; i++) {
+ * 	remainder ^= next_input_byte();
+ * 	for (j = 0; j < 8; j++) {
+ * 		multiple = (remainder & 1) ? CRCPOLY : 0;
+ * 		remainder = (remainder << 1) ^ multiple;
+ * 	}
+ * }
+ * If the input is a multiple of 32 bits, you can even XOR in a 32-bit
+ * word at a time and increase the inner loop count to 32.
+ *
+ * You can also mix and match the two loop styles, for example doing the
+ * bulk of a message byte-at-a-time and adding bit-at-a-time processing
+ * for any fractional bytes at the end.
+ *
+ * The only remaining optimization is to the byte-at-a-time table method.
+ * Here, rather than just shifting one bit of the remainder to decide
+ * in the correct multiple to subtract, we can shift a byte at a time.
+ * This produces a 40-bit (rather than a 33-bit) intermediate remainder,
+ * but again the multiple of the polynomial to subtract depends only on
+ * the high bits, the high 8 bits in this case.  
+ *
+ * The multile we need in that case is the low 32 bits of a 40-bit
+ * value whose high 8 bits are given, and which is a multiple of the
+ * generator polynomial.  This is simply the CRC-32 of the given
+ * one-byte message.
+ *
+ * Two more details: normally, appending zero bits to a message which
+ * is already a multiple of a polynomial produces a larger multiple of that
+ * polynomial.  To enable a CRC to detect this condition, it's common to
+ * invert the CRC before appending it.  This makes the remainder of the
+ * message+crc come out not as zero, but some fixed non-zero value.
+ *
+ * The same problem applies to zero bits prepended to the message, and
+ * a similar solution is used.  Instead of starting with a remainder of
+ * 0, an initial remainder of all ones is used.  As long as you start
+ * the same way on decoding, it doesn't make a difference.
+ */
+
+#ifdef UNITTEST
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#if 0				/*Not used at present */
+static void
+buf_dump(char const *prefix, unsigned char const *buf, size_t len)
+{
+	fputs(prefix, stdout);
+	while (len--)
+		printf(" %02x", *buf++);
+	putchar('\n');
+
+}
+#endif
+
+static void bytereverse(unsigned char *buf, size_t len)
+{
+	while (len--) {
+		unsigned char x = *buf;
+		x = (x >> 4) | (x << 4);
+		x = (x >> 2 & 0x33) | (x << 2 & 0xcc);
+		x = (x >> 1 & 0x55) | (x << 1 & 0xaa);
+		*buf++ = x;
+	}
+}
+
+static void random_garbage(unsigned char *buf, size_t len)
+{
+	while (len--)
+		*buf++ = (unsigned char) random();
+}
+
+#if 0				/* Not used at present */
+static void store_le(u32 x, unsigned char *buf)
+{
+	buf[0] = (unsigned char) x;
+	buf[1] = (unsigned char) (x >> 8);
+	buf[2] = (unsigned char) (x >> 16);
+	buf[3] = (unsigned char) (x >> 24);
+}
+#endif
+
+static void store_be(u32 x, unsigned char *buf)
+{
+	buf[0] = (unsigned char) (x >> 24);
+	buf[1] = (unsigned char) (x >> 16);
+	buf[2] = (unsigned char) (x >> 8);
+	buf[3] = (unsigned char) x;
+}
+
+/*
+ * This checks that CRC(buf + CRC(buf)) = 0, and that
+ * CRC commutes with bit-reversal.  This has the side effect
+ * of bytewise bit-reversing the input buffer, and returns
+ * the CRC of the reversed buffer.
+ */
+static u32 test_step(u32 init, unsigned char *buf, size_t len)
+{
+	u32 crc1, crc2;
+	size_t i;
+
+	crc1 = crc32_be(init, buf, len);
+	store_be(crc1, buf + len);
+	crc2 = crc32_be(init, buf, len + 4);
+	if (crc2)
+		printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+		       crc2);
+
+	for (i = 0; i <= len + 4; i++) {
+		crc2 = crc32_be(init, buf, i);
+		crc2 = crc32_be(crc2, buf + i, len + 4 - i);
+		if (crc2)
+			printf("\nCRC split fail: 0x%08x\n", crc2);
+	}
+
+	/* Now swap it around for the other test */
+
+	bytereverse(buf, len + 4);
+	init = bitreverse(init);
+	crc2 = bitreverse(crc1);
+	if (crc1 != bitreverse(crc2))
+		printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n",
+		       crc1, crc2, bitreverse(crc2));
+	crc1 = crc32_le(init, buf, len);
+	if (crc1 != crc2)
+		printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1,
+		       crc2);
+	crc2 = crc32_le(init, buf, len + 4);
+	if (crc2)
+		printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+		       crc2);
+
+	for (i = 0; i <= len + 4; i++) {
+		crc2 = crc32_le(init, buf, i);
+		crc2 = crc32_le(crc2, buf + i, len + 4 - i);
+		if (crc2)
+			printf("\nCRC split fail: 0x%08x\n", crc2);
+	}
+
+	return crc1;
+}
+
+#define SIZE 64
+#define INIT1 0
+#define INIT2 0
+
+int main(void)
+{
+	unsigned char buf1[SIZE + 4];
+	unsigned char buf2[SIZE + 4];
+	unsigned char buf3[SIZE + 4];
+	int i, j;
+	u32 crc1, crc2, crc3;
+
+	for (i = 0; i <= SIZE; i++) {
+		printf("\rTesting length %d...", i);
+		fflush(stdout);
+		random_garbage(buf1, i);
+		random_garbage(buf2, i);
+		for (j = 0; j < i; j++)
+			buf3[j] = buf1[j] ^ buf2[j];
+
+		crc1 = test_step(INIT1, buf1, i);
+		crc2 = test_step(INIT2, buf2, i);
+		/* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */
+		crc3 = test_step(INIT1 ^ INIT2, buf3, i);
+		if (crc3 != (crc1 ^ crc2))
+			printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n",
+			       crc3, crc1, crc2);
+	}
+	printf("\nAll test complete.  No failures expected.\n");
+	return 0;
+}
+
+#endif				/* UNITTEST */
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
new file mode 100644
index 0000000..9b6773d
--- /dev/null
+++ b/lib/crc32defs.h
@@ -0,0 +1,32 @@
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+/* How many bits at a time to use.  Requires a table of 4<<CRC_xx_BITS bytes. */
+/* For less performance-sensitive, use 4 */
+#ifndef CRC_LE_BITS 
+# define CRC_LE_BITS 8
+#endif
+#ifndef CRC_BE_BITS
+# define CRC_BE_BITS 8
+#endif
+
+/*
+ * Little-endian CRC computation.  Used with serial bit streams sent
+ * lsbit-first.  Be sure to use cpu_to_le32() to append the computed CRC.
+ */
+#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1
+# error CRC_LE_BITS must be a power of 2 between 1 and 8
+#endif
+
+/*
+ * Big-endian CRC computation.  Used with serial bit streams sent
+ * msbit-first.  Be sure to use cpu_to_be32() to append the computed CRC.
+ */
+#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1
+# error CRC_BE_BITS must be a power of 2 between 1 and 8
+#endif
diff --git a/lib/ctype.c b/lib/ctype.c
new file mode 100644
index 0000000..d02ace1
--- /dev/null
+++ b/lib/ctype.c
@@ -0,0 +1,36 @@
+/*
+ *  linux/lib/ctype.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C,			/* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C,		/* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C,			/* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C,			/* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,			/* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P,			/* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D,			/* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P,			/* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U,	/* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U,			/* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U,			/* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P,			/* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L,	/* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L,			/* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L,			/* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C,			/* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,   /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,       /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,       /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L,       /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,       /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L};      /* 240-255 */
+
+EXPORT_SYMBOL(_ctype);
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
new file mode 100644
index 0000000..6658d81
--- /dev/null
+++ b/lib/dec_and_lock.c
@@ -0,0 +1,40 @@
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+/*
+ * This is an architecture-neutral, but slow,
+ * implementation of the notion of "decrement
+ * a reference count, and return locked if it
+ * decremented to zero".
+ *
+ * NOTE NOTE NOTE! This is _not_ equivalent to
+ *
+ *	if (atomic_dec_and_test(&atomic)) {
+ *		spin_lock(&lock);
+ *		return 1;
+ *	}
+ *	return 0;
+ *
+ * because the spin-lock and the decrement must be
+ * "atomic".
+ *
+ * This slow version gets the spinlock unconditionally,
+ * and releases it if it isn't needed. Architectures
+ * are encouraged to come up with better approaches,
+ * this is trivially done efficiently using a load-locked
+ * store-conditional approach, for example.
+ */
+
+#ifndef ATOMIC_DEC_AND_LOCK
+int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+	spin_lock(lock);
+	if (atomic_dec_and_test(atomic))
+		return 1;
+	spin_unlock(lock);
+	return 0;
+}
+
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
diff --git a/lib/div64.c b/lib/div64.c
new file mode 100644
index 0000000..365719f
--- /dev/null
+++ b/lib/div64.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ *
+ * Based on former do_div() implementation from asm-parisc/div64.h:
+ *	Copyright (C) 1999 Hewlett-Packard Co
+ *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ *
+ * Generic C version of 64bit/32bit division and modulo, with
+ * 64bit result and 32bit remainder.
+ *
+ * The fast case for (n>>32 == 0) is handled inline by do_div(). 
+ *
+ * Code generated for this function might be very inefficient
+ * for some CPUs. __div64_32() can be overridden by linking arch-specific
+ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/div64.h>
+
+/* Not needed on 64bit architectures */
+#if BITS_PER_LONG == 32
+
+uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+	uint64_t rem = *n;
+	uint64_t b = base;
+	uint64_t res, d = 1;
+	uint32_t high = rem >> 32;
+
+	/* Reduce the thing a bit first */
+	res = 0;
+	if (high >= base) {
+		high /= base;
+		res = (uint64_t) high << 32;
+		rem -= (uint64_t) (high*base) << 32;
+	}
+
+	while ((int64_t)b > 0 && b < rem) {
+		b = b+b;
+		d = d+d;
+	}
+
+	do {
+		if (rem >= b) {
+			rem -= b;
+			res += d;
+		}
+		b >>= 1;
+		d >>= 1;
+	} while (d);
+
+	*n = res;
+	return rem;
+}
+
+EXPORT_SYMBOL(__div64_32);
+
+#endif /* BITS_PER_LONG == 32 */
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
new file mode 100644
index 0000000..53bff4c
--- /dev/null
+++ b/lib/dump_stack.c
@@ -0,0 +1,15 @@
+/*
+ * Provide a default dump_stack() function for architectures
+ * which don't implement their own.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+void dump_stack(void)
+{
+	printk(KERN_NOTICE
+		"This architecture does not implement dump_stack()\n");
+}
+
+EXPORT_SYMBOL(dump_stack);
diff --git a/lib/errno.c b/lib/errno.c
new file mode 100644
index 0000000..41cb9d7
--- /dev/null
+++ b/lib/errno.c
@@ -0,0 +1,7 @@
+/*
+ *  linux/lib/errno.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+int errno;
diff --git a/lib/extable.c b/lib/extable.c
new file mode 100644
index 0000000..3f677a8
--- /dev/null
+++ b/lib/extable.c
@@ -0,0 +1,79 @@
+/*
+ * lib/extable.c
+ * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
+ *
+ * Copyright (C) 2004 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+
+extern struct exception_table_entry __start___ex_table[];
+extern struct exception_table_entry __stop___ex_table[];
+
+#ifndef ARCH_HAS_SORT_EXTABLE
+/*
+ * The exception table needs to be sorted so that the binary
+ * search that we use to find entries in it works properly.
+ * This is used both for the kernel exception table and for
+ * the exception tables of modules that get loaded.
+ */
+static int cmp_ex(const void *a, const void *b)
+{
+	const struct exception_table_entry *x = a, *y = b;
+
+	/* avoid overflow */
+	if (x->insn > y->insn)
+		return 1;
+	if (x->insn < y->insn)
+		return -1;
+	return 0;
+}
+
+void sort_extable(struct exception_table_entry *start,
+		  struct exception_table_entry *finish)
+{
+	sort(start, finish - start, sizeof(struct exception_table_entry),
+	     cmp_ex, NULL);
+}
+#endif
+
+#ifndef ARCH_HAS_SEARCH_EXTABLE
+/*
+ * Search one exception table for an entry corresponding to the
+ * given instruction address, and return the address of the entry,
+ * or NULL if none is found.
+ * We use a binary search, and thus we assume that the table is
+ * already sorted.
+ */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+	       const struct exception_table_entry *last,
+	       unsigned long value)
+{
+	while (first <= last) {
+		const struct exception_table_entry *mid;
+
+		mid = (last - first) / 2 + first;
+		/*
+		 * careful, the distance between entries can be
+		 * larger than 2GB:
+		 */
+		if (mid->insn < value)
+			first = mid + 1;
+		else if (mid->insn > value)
+			last = mid - 1;
+		else
+			return mid;
+        }
+        return NULL;
+}
+#endif
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
new file mode 100644
index 0000000..d08302d
--- /dev/null
+++ b/lib/find_next_bit.c
@@ -0,0 +1,55 @@
+/* find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+
+int find_next_bit(const unsigned long *addr, int size, int offset)
+{
+	const unsigned long *base;
+	const int NBITS = sizeof(*addr) * 8;
+	unsigned long tmp;
+
+	base = addr;
+	if (offset) {
+		int suboffset;
+
+		addr += offset / NBITS;
+
+		suboffset = offset % NBITS;
+		if (suboffset) {
+			tmp = *addr;
+			tmp >>= suboffset;
+			if (tmp)
+				goto finish;
+		}
+
+		addr++;
+	}
+
+	while ((tmp = *addr) == 0)
+		addr++;
+
+	offset = (addr - base) * NBITS;
+
+ finish:
+	/* count the remaining bits without using __ffs() since that takes a 32-bit arg */
+	while (!(tmp & 0xff)) {
+		offset += 8;
+		tmp >>= 8;
+	}
+
+	while (!(tmp & 1)) {
+		offset++;
+		tmp >>= 1;
+	}
+
+	return offset;
+}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
new file mode 100644
index 0000000..bea5d97
--- /dev/null
+++ b/lib/gen_crc32table.c
@@ -0,0 +1,82 @@
+#include <stdio.h>
+#include "crc32defs.h"
+#include <inttypes.h>
+
+#define ENTRIES_PER_LINE 4
+
+#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
+#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
+
+static uint32_t crc32table_le[LE_TABLE_SIZE];
+static uint32_t crc32table_be[BE_TABLE_SIZE];
+
+/**
+ * crc32init_le() - allocate and initialize LE table data
+ *
+ * crc is the crc of the byte i; other entries are filled in based on the
+ * fact that crctable[i^j] = crctable[i] ^ crctable[j].
+ *
+ */
+static void crc32init_le(void)
+{
+	unsigned i, j;
+	uint32_t crc = 1;
+
+	crc32table_le[0] = 0;
+
+	for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
+		crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
+		for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
+			crc32table_le[i + j] = crc ^ crc32table_le[j];
+	}
+}
+
+/**
+ * crc32init_be() - allocate and initialize BE table data
+ */
+static void crc32init_be(void)
+{
+	unsigned i, j;
+	uint32_t crc = 0x80000000;
+
+	crc32table_be[0] = 0;
+
+	for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+		crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+		for (j = 0; j < i; j++)
+			crc32table_be[i + j] = crc ^ crc32table_be[j];
+	}
+}
+
+static void output_table(uint32_t table[], int len, char *trans)
+{
+	int i;
+
+	for (i = 0; i < len - 1; i++) {
+		if (i % ENTRIES_PER_LINE == 0)
+			printf("\n");
+		printf("%s(0x%8.8xL), ", trans, table[i]);
+	}
+	printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
+}
+
+int main(int argc, char** argv)
+{
+	printf("/* this file is generated - do not edit */\n\n");
+
+	if (CRC_LE_BITS > 1) {
+		crc32init_le();
+		printf("static const u32 crc32table_le[] = {");
+		output_table(crc32table_le, LE_TABLE_SIZE, "tole");
+		printf("};\n");
+	}
+
+	if (CRC_BE_BITS > 1) {
+		crc32init_be();
+		printf("static const u32 crc32table_be[] = {");
+		output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
+		printf("};\n");
+	}
+
+	return 0;
+}
diff --git a/lib/halfmd4.c b/lib/halfmd4.c
new file mode 100644
index 0000000..e11db26
--- /dev/null
+++ b/lib/halfmd4.c
@@ -0,0 +1,66 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cryptohash.h>
+
+/* F, G and H are basic MD4 functions: selection, majority, parity */
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+
+/*
+ * The generic round function.  The application is so specific that
+ * we don't bother protecting all the arguments with parens, as is generally
+ * good macro practice, in favor of extra legibility.
+ * Rotation is separate from addition to prevent recomputation
+ */
+#define ROUND(f, a, b, c, d, x, s)	\
+	(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
+#define K1 0
+#define K2 013240474631UL
+#define K3 015666365641UL
+
+/*
+ * Basic cut-down MD4 transform.  Returns only 32 bits of result.
+ */
+__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
+{
+	__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
+
+	/* Round 1 */
+	ROUND(F, a, b, c, d, in[0] + K1,  3);
+	ROUND(F, d, a, b, c, in[1] + K1,  7);
+	ROUND(F, c, d, a, b, in[2] + K1, 11);
+	ROUND(F, b, c, d, a, in[3] + K1, 19);
+	ROUND(F, a, b, c, d, in[4] + K1,  3);
+	ROUND(F, d, a, b, c, in[5] + K1,  7);
+	ROUND(F, c, d, a, b, in[6] + K1, 11);
+	ROUND(F, b, c, d, a, in[7] + K1, 19);
+
+	/* Round 2 */
+	ROUND(G, a, b, c, d, in[1] + K2,  3);
+	ROUND(G, d, a, b, c, in[3] + K2,  5);
+	ROUND(G, c, d, a, b, in[5] + K2,  9);
+	ROUND(G, b, c, d, a, in[7] + K2, 13);
+	ROUND(G, a, b, c, d, in[0] + K2,  3);
+	ROUND(G, d, a, b, c, in[2] + K2,  5);
+	ROUND(G, c, d, a, b, in[4] + K2,  9);
+	ROUND(G, b, c, d, a, in[6] + K2, 13);
+
+	/* Round 3 */
+	ROUND(H, a, b, c, d, in[3] + K3,  3);
+	ROUND(H, d, a, b, c, in[7] + K3,  9);
+	ROUND(H, c, d, a, b, in[2] + K3, 11);
+	ROUND(H, b, c, d, a, in[6] + K3, 15);
+	ROUND(H, a, b, c, d, in[1] + K3,  3);
+	ROUND(H, d, a, b, c, in[5] + K3,  9);
+	ROUND(H, c, d, a, b, in[0] + K3, 11);
+	ROUND(H, b, c, d, a, in[4] + K3, 15);
+
+	buf[0] += a;
+	buf[1] += b;
+	buf[2] += c;
+	buf[3] += d;
+
+	return buf[1]; /* "most hashed" word */
+}
+EXPORT_SYMBOL(half_md4_transform);
diff --git a/lib/idr.c b/lib/idr.c
new file mode 100644
index 0000000..81fc430
--- /dev/null
+++ b/lib/idr.c
@@ -0,0 +1,408 @@
+/*
+ * 2002-10-18  written by Jim Houston jim.houston@ccur.com
+ *	Copyright (C) 2002 by Concurrent Computer Corporation
+ *	Distributed under the GNU GPL license version 2.
+ *
+ * Modified by George Anzinger to reuse immediately and to use
+ * find bit instructions.  Also removed _irq on spinlocks.
+ *
+ * Small id to pointer translation service.  
+ *
+ * It uses a radix tree like structure as a sparse array indexed 
+ * by the id to obtain the pointer.  The bitmap makes allocating
+ * a new id quick.  
+ *
+ * You call it to allocate an id (an int) an associate with that id a
+ * pointer or what ever, we treat it as a (void *).  You can pass this
+ * id to a user for him to pass back at a later time.  You then pass
+ * that id to this code and it returns your pointer.
+
+ * You can release ids at any time. When all ids are released, most of 
+ * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
+ * don't need to go to the memory "store" during an id allocate, just 
+ * so you don't need to be too concerned about locking and conflicts
+ * with the slab allocator.
+ */
+
+#ifndef TEST                        // to test in user space...
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#endif
+#include <linux/string.h>
+#include <linux/idr.h>
+
+static kmem_cache_t *idr_layer_cache;
+
+static struct idr_layer *alloc_layer(struct idr *idp)
+{
+	struct idr_layer *p;
+
+	spin_lock(&idp->lock);
+	if ((p = idp->id_free)) {
+		idp->id_free = p->ary[0];
+		idp->id_free_cnt--;
+		p->ary[0] = NULL;
+	}
+	spin_unlock(&idp->lock);
+	return(p);
+}
+
+static void free_layer(struct idr *idp, struct idr_layer *p)
+{
+	/*
+	 * Depends on the return element being zeroed.
+	 */
+	spin_lock(&idp->lock);
+	p->ary[0] = idp->id_free;
+	idp->id_free = p;
+	idp->id_free_cnt++;
+	spin_unlock(&idp->lock);
+}
+
+/**
+ * idr_pre_get - reserver resources for idr allocation
+ * @idp:	idr handle
+ * @gfp_mask:	memory allocation flags
+ *
+ * This function should be called prior to locking and calling the
+ * following function.  It preallocates enough memory to satisfy
+ * the worst possible allocation.
+ *
+ * If the system is REALLY out of memory this function returns 0,
+ * otherwise 1.
+ */
+int idr_pre_get(struct idr *idp, unsigned gfp_mask)
+{
+	while (idp->id_free_cnt < IDR_FREE_MAX) {
+		struct idr_layer *new;
+		new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
+		if(new == NULL)
+			return (0);
+		free_layer(idp, new);
+	}
+	return 1;
+}
+EXPORT_SYMBOL(idr_pre_get);
+
+static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
+{
+	int n, m, sh;
+	struct idr_layer *p, *new;
+	struct idr_layer *pa[MAX_LEVEL];
+	int l, id;
+	long bm;
+
+	id = *starting_id;
+	p = idp->top;
+	l = idp->layers;
+	pa[l--] = NULL;
+	while (1) {
+		/*
+		 * We run around this while until we reach the leaf node...
+		 */
+		n = (id >> (IDR_BITS*l)) & IDR_MASK;
+		bm = ~p->bitmap;
+		m = find_next_bit(&bm, IDR_SIZE, n);
+		if (m == IDR_SIZE) {
+			/* no space available go back to previous layer. */
+			l++;
+			id = (id | ((1 << (IDR_BITS*l))-1)) + 1;
+			if (!(p = pa[l])) {
+				*starting_id = id;
+				return -2;
+			}
+			continue;
+		}
+		if (m != n) {
+			sh = IDR_BITS*l;
+			id = ((id >> sh) ^ n ^ m) << sh;
+		}
+		if ((id >= MAX_ID_BIT) || (id < 0))
+			return -3;
+		if (l == 0)
+			break;
+		/*
+		 * Create the layer below if it is missing.
+		 */
+		if (!p->ary[m]) {
+			if (!(new = alloc_layer(idp)))
+				return -1;
+			p->ary[m] = new;
+			p->count++;
+		}
+		pa[l--] = p;
+		p = p->ary[m];
+	}
+	/*
+	 * We have reached the leaf node, plant the
+	 * users pointer and return the raw id.
+	 */
+	p->ary[m] = (struct idr_layer *)ptr;
+	__set_bit(m, &p->bitmap);
+	p->count++;
+	/*
+	 * If this layer is full mark the bit in the layer above
+	 * to show that this part of the radix tree is full.
+	 * This may complete the layer above and require walking
+	 * up the radix tree.
+	 */
+	n = id;
+	while (p->bitmap == IDR_FULL) {
+		if (!(p = pa[++l]))
+			break;
+		n = n >> IDR_BITS;
+		__set_bit((n & IDR_MASK), &p->bitmap);
+	}
+	return(id);
+}
+
+static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+{
+	struct idr_layer *p, *new;
+	int layers, v, id;
+	
+	id = starting_id;
+build_up:
+	p = idp->top;
+	layers = idp->layers;
+	if (unlikely(!p)) {
+		if (!(p = alloc_layer(idp)))
+			return -1;
+		layers = 1;
+	}
+	/*
+	 * Add a new layer to the top of the tree if the requested
+	 * id is larger than the currently allocated space.
+	 */
+	while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) {
+		layers++;
+		if (!p->count)
+			continue;
+		if (!(new = alloc_layer(idp))) {
+			/*
+			 * The allocation failed.  If we built part of
+			 * the structure tear it down.
+			 */
+			for (new = p; p && p != idp->top; new = p) {
+				p = p->ary[0];
+				new->ary[0] = NULL;
+				new->bitmap = new->count = 0;
+				free_layer(idp, new);
+			}
+			return -1;
+		}
+		new->ary[0] = p;
+		new->count = 1;
+		if (p->bitmap == IDR_FULL)
+			__set_bit(0, &new->bitmap);
+		p = new;
+	}
+	idp->top = p;
+	idp->layers = layers;
+	v = sub_alloc(idp, ptr, &id);
+	if (v == -2)
+		goto build_up;
+	return(v);
+}
+
+/**
+ * idr_get_new_above - allocate new idr entry above a start id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the ide
+ * @start_id: id to start search at
+ * @id: pointer to the allocated handle
+ *
+ * This is the allocate id function.  It should be called with any
+ * required locks.
+ *
+ * If memory is required, it will return -EAGAIN, you should unlock
+ * and go back to the idr_pre_get() call.  If the idr is full, it will
+ * return -ENOSPC.
+ *
+ * @id returns a value in the range 0 ... 0x7fffffff
+ */
+int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
+{
+	int rv;
+	rv = idr_get_new_above_int(idp, ptr, starting_id);
+	/*
+	 * This is a cheap hack until the IDR code can be fixed to
+	 * return proper error values.
+	 */
+	if (rv < 0) {
+		if (rv == -1)
+			return -EAGAIN;
+		else /* Will be -3 */
+			return -ENOSPC;
+	}
+	*id = rv;
+	return 0;
+}
+EXPORT_SYMBOL(idr_get_new_above);
+
+/**
+ * idr_get_new - allocate new idr entry
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the ide
+ * @id: pointer to the allocated handle
+ *
+ * This is the allocate id function.  It should be called with any
+ * required locks.
+ *
+ * If memory is required, it will return -EAGAIN, you should unlock
+ * and go back to the idr_pre_get() call.  If the idr is full, it will
+ * return -ENOSPC.
+ *
+ * @id returns a value in the range 0 ... 0x7fffffff
+ */
+int idr_get_new(struct idr *idp, void *ptr, int *id)
+{
+	int rv;
+	rv = idr_get_new_above_int(idp, ptr, 0);
+	/*
+	 * This is a cheap hack until the IDR code can be fixed to
+	 * return proper error values.
+	 */
+	if (rv < 0) {
+		if (rv == -1)
+			return -EAGAIN;
+		else /* Will be -3 */
+			return -ENOSPC;
+	}
+	*id = rv;
+	return 0;
+}
+EXPORT_SYMBOL(idr_get_new);
+
+static void idr_remove_warning(int id)
+{
+	printk("idr_remove called for id=%d which is not allocated.\n", id);
+	dump_stack();
+}
+
+static void sub_remove(struct idr *idp, int shift, int id)
+{
+	struct idr_layer *p = idp->top;
+	struct idr_layer **pa[MAX_LEVEL];
+	struct idr_layer ***paa = &pa[0];
+	int n;
+
+	*paa = NULL;
+	*++paa = &idp->top;
+
+	while ((shift > 0) && p) {
+		n = (id >> shift) & IDR_MASK;
+		__clear_bit(n, &p->bitmap);
+		*++paa = &p->ary[n];
+		p = p->ary[n];
+		shift -= IDR_BITS;
+	}
+	n = id & IDR_MASK;
+	if (likely(p != NULL && test_bit(n, &p->bitmap))){
+		__clear_bit(n, &p->bitmap);
+		p->ary[n] = NULL;
+		while(*paa && ! --((**paa)->count)){
+			free_layer(idp, **paa);
+			**paa-- = NULL;
+		}
+		if ( ! *paa )
+			idp->layers = 0;
+	} else {
+		idr_remove_warning(id);
+	}
+}
+
+/**
+ * idr_remove - remove the given id and free it's slot
+ * idp: idr handle
+ * id: uniqueue key
+ */
+void idr_remove(struct idr *idp, int id)
+{
+	struct idr_layer *p;
+
+	/* Mask off upper bits we don't use for the search. */
+	id &= MAX_ID_MASK;
+
+	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
+	if ( idp->top && idp->top->count == 1 && 
+	     (idp->layers > 1) &&
+	     idp->top->ary[0]){  // We can drop a layer
+
+		p = idp->top->ary[0];
+		idp->top->bitmap = idp->top->count = 0;
+		free_layer(idp, idp->top);
+		idp->top = p;
+		--idp->layers;
+	}
+	while (idp->id_free_cnt >= IDR_FREE_MAX) {
+		
+		p = alloc_layer(idp);
+		kmem_cache_free(idr_layer_cache, p);
+		return;
+	}
+}
+EXPORT_SYMBOL(idr_remove);
+
+/**
+ * idr_find - return pointer for given id
+ * @idp: idr handle
+ * @id: lookup key
+ *
+ * Return the pointer given the id it has been registered with.  A %NULL
+ * return indicates that @id is not valid or you passed %NULL in
+ * idr_get_new().
+ *
+ * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ */
+void *idr_find(struct idr *idp, int id)
+{
+	int n;
+	struct idr_layer *p;
+
+	n = idp->layers * IDR_BITS;
+	p = idp->top;
+
+	/* Mask off upper bits we don't use for the search. */
+	id &= MAX_ID_MASK;
+
+	if (id >= (1 << n))
+		return NULL;
+
+	while (n > 0 && p) {
+		n -= IDR_BITS;
+		p = p->ary[(id >> n) & IDR_MASK];
+	}
+	return((void *)p);
+}
+EXPORT_SYMBOL(idr_find);
+
+static void idr_cache_ctor(void * idr_layer, 
+			   kmem_cache_t *idr_layer_cache, unsigned long flags)
+{
+	memset(idr_layer, 0, sizeof(struct idr_layer));
+}
+
+static  int init_id_cache(void)
+{
+	if (!idr_layer_cache)
+		idr_layer_cache = kmem_cache_create("idr_layer_cache", 
+			sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
+	return 0;
+}
+
+/**
+ * idr_init - initialize idr handle
+ * @idp:	idr handle
+ *
+ * This function is use to set up the handle (@idp) that you will pass
+ * to the rest of the functions.
+ */
+void idr_init(struct idr *idp)
+{
+	init_id_cache();
+	memset(idp, 0, sizeof(struct idr));
+	spin_lock_init(&idp->lock);
+}
+EXPORT_SYMBOL(idr_init);
diff --git a/lib/inflate.c b/lib/inflate.c
new file mode 100644
index 0000000..75e7d30
--- /dev/null
+++ b/lib/inflate.c
@@ -0,0 +1,1210 @@
+#define DEBG(x)
+#define DEBG1(x)
+/* inflate.c -- Not copyrighted 1992 by Mark Adler
+   version c10p1, 10 January 1993 */
+
+/* 
+ * Adapted for booting Linux by Hannu Savolainen 1993
+ * based on gzip-1.0.3 
+ *
+ * Nicolas Pitre <nico@cam.org>, 1999/04/14 :
+ *   Little mods for all variable to reside either into rodata or bss segments
+ *   by marking constant variables with 'const' and initializing all the others
+ *   at run-time only.  This allows for the kernel uncompressor to run
+ *   directly from Flash or ROM memory on embedded systems.
+ */
+
+/*
+   Inflate deflated (PKZIP's method 8 compressed) data.  The compression
+   method searches for as much of the current string of bytes (up to a
+   length of 258) in the previous 32 K bytes.  If it doesn't find any
+   matches (of at least length 3), it codes the next byte.  Otherwise, it
+   codes the length of the matched string and its distance backwards from
+   the current position.  There is a single Huffman code that codes both
+   single bytes (called "literals") and match lengths.  A second Huffman
+   code codes the distance information, which follows a length code.  Each
+   length or distance code actually represents a base value and a number
+   of "extra" (sometimes zero) bits to get to add to the base value.  At
+   the end of each deflated block is a special end-of-block (EOB) literal/
+   length code.  The decoding process is basically: get a literal/length
+   code; if EOB then done; if a literal, emit the decoded byte; if a
+   length then get the distance and emit the referred-to bytes from the
+   sliding window of previously emitted data.
+
+   There are (currently) three kinds of inflate blocks: stored, fixed, and
+   dynamic.  The compressor deals with some chunk of data at a time, and
+   decides which method to use on a chunk-by-chunk basis.  A chunk might
+   typically be 32 K or 64 K.  If the chunk is incompressible, then the
+   "stored" method is used.  In this case, the bytes are simply stored as
+   is, eight bits per byte, with none of the above coding.  The bytes are
+   preceded by a count, since there is no longer an EOB code.
+
+   If the data is compressible, then either the fixed or dynamic methods
+   are used.  In the dynamic method, the compressed data is preceded by
+   an encoding of the literal/length and distance Huffman codes that are
+   to be used to decode this block.  The representation is itself Huffman
+   coded, and so is preceded by a description of that code.  These code
+   descriptions take up a little space, and so for small blocks, there is
+   a predefined set of codes, called the fixed codes.  The fixed method is
+   used if the block codes up smaller that way (usually for quite small
+   chunks), otherwise the dynamic method is used.  In the latter case, the
+   codes are customized to the probabilities in the current block, and so
+   can code it much better than the pre-determined fixed codes.
+ 
+   The Huffman codes themselves are decoded using a multi-level table
+   lookup, in order to maximize the speed of decoding plus the speed of
+   building the decoding tables.  See the comments below that precede the
+   lbits and dbits tuning parameters.
+ */
+
+
+/*
+   Notes beyond the 1.93a appnote.txt:
+
+   1. Distance pointers never point before the beginning of the output
+      stream.
+   2. Distance pointers can point back across blocks, up to 32k away.
+   3. There is an implied maximum of 7 bits for the bit length table and
+      15 bits for the actual data.
+   4. If only one code exists, then it is encoded using one bit.  (Zero
+      would be more efficient, but perhaps a little confusing.)  If two
+      codes exist, they are coded using one bit each (0 and 1).
+   5. There is no way of sending zero distance codes--a dummy must be
+      sent if there are none.  (History: a pre 2.0 version of PKZIP would
+      store blocks with no distance codes, but this was discovered to be
+      too harsh a criterion.)  Valid only for 1.93a.  2.04c does allow
+      zero distance codes, which is sent as one code of zero bits in
+      length.
+   6. There are up to 286 literal/length codes.  Code 256 represents the
+      end-of-block.  Note however that the static length tree defines
+      288 codes just to fill out the Huffman codes.  Codes 286 and 287
+      cannot be used though, since there is no length base or extra bits
+      defined for them.  Similarly, there are up to 30 distance codes.
+      However, static trees define 32 codes (all 5 bits) to fill out the
+      Huffman codes, but the last two had better not show up in the data.
+   7. Unzip can check dynamic Huffman blocks for complete code sets.
+      The exception is that a single code would not be complete (see #4).
+   8. The five bits following the block type is really the number of
+      literal codes sent minus 257.
+   9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+      (1+6+6).  Therefore, to output three times the length, you output
+      three codes (1+1+1), whereas to output four times the same length,
+      you only need two codes (1+3).  Hmm.
+  10. In the tree reconstruction algorithm, Code = Code + Increment
+      only if BitLength(i) is not zero.  (Pretty obvious.)
+  11. Correction: 4 Bits: # of Bit Length codes - 4     (4 - 19)
+  12. Note: length code 284 can represent 227-258, but length code 285
+      really is 258.  The last length deserves its own, short code
+      since it gets used a lot in very redundant files.  The length
+      258 is special since 258 - 3 (the min match length) is 255.
+  13. The literal/length and distance code bit lengths are read as a
+      single stream of lengths.  It is possible (and advantageous) for
+      a repeat code (16, 17, or 18) to go across the boundary between
+      the two sets of lengths.
+ */
+#include <linux/compiler.h>
+
+#ifdef RCSID
+static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
+#endif
+
+#ifndef STATIC
+
+#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H)
+#  include <sys/types.h>
+#  include <stdlib.h>
+#endif
+
+#include "gzip.h"
+#define STATIC
+#endif /* !STATIC */
+
+#ifndef INIT
+#define INIT
+#endif
+	
+#define slide window
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+   that have 16-bit pointers (e.g. PC's in the small or medium model).
+   Valid extra bits are 0..13.  e == 15 is EOB (end of block), e == 16
+   means that v is a literal, 16 < e < 32 means that v is a pointer to
+   the next table, which codes e - 16 bits, and lastly e == 99 indicates
+   an unused code.  If a code with e == 99 is looked up, this implies an
+   error in the data. */
+struct huft {
+  uch e;                /* number of extra bits or operation */
+  uch b;                /* number of bits in this code or subcode */
+  union {
+    ush n;              /* literal, length base, or distance base */
+    struct huft *t;     /* pointer to next level of table */
+  } v;
+};
+
+
+/* Function prototypes */
+STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned, 
+		const ush *, const ush *, struct huft **, int *));
+STATIC int INIT huft_free OF((struct huft *));
+STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int));
+STATIC int INIT inflate_stored OF((void));
+STATIC int INIT inflate_fixed OF((void));
+STATIC int INIT inflate_dynamic OF((void));
+STATIC int INIT inflate_block OF((int *));
+STATIC int INIT inflate OF((void));
+
+
+/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed
+   stream to find repeated byte strings.  This is implemented here as a
+   circular buffer.  The index is updated simply by incrementing and then
+   ANDing with 0x7fff (32K-1). */
+/* It is left to other modules to supply the 32 K area.  It is assumed
+   to be usable as if it were declared "uch slide[32768];" or as just
+   "uch *slide;" and then malloc'ed in the latter case.  The definition
+   must be in unzip.h, included above. */
+/* unsigned wp;             current position in slide */
+#define wp outcnt
+#define flush_output(w) (wp=(w),flush_window())
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+static const unsigned border[] = {    /* Order of the bit length code lengths */
+        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+static const ush cplens[] = {         /* Copy lengths for literal codes 257..285 */
+        3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+        35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+        /* note: see note #13 above about the 258 in this list. */
+static const ush cplext[] = {         /* Extra bits for literal codes 257..285 */
+        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+        3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
+static const ush cpdist[] = {         /* Copy offsets for distance codes 0..29 */
+        1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+        257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+        8193, 12289, 16385, 24577};
+static const ush cpdext[] = {         /* Extra bits for distance codes */
+        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+        7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+        12, 12, 13, 13};
+
+
+
+/* Macros for inflate() bit peeking and grabbing.
+   The usage is:
+   
+        NEEDBITS(j)
+        x = b & mask_bits[j];
+        DUMPBITS(j)
+
+   where NEEDBITS makes sure that b has at least j bits in it, and
+   DUMPBITS removes the bits from b.  The macros use the variable k
+   for the number of bits in b.  Normally, b and k are register
+   variables for speed, and are initialized at the beginning of a
+   routine that uses these macros from a global bit buffer and count.
+
+   If we assume that EOB will be the longest code, then we will never
+   ask for bits with NEEDBITS that are beyond the end of the stream.
+   So, NEEDBITS should not read any more bytes than are needed to
+   meet the request.  Then no bytes need to be "returned" to the buffer
+   at the end of the last block.
+
+   However, this assumption is not true for fixed blocks--the EOB code
+   is 7 bits, but the other literal/length codes can be 8 or 9 bits.
+   (The EOB code is shorter than other codes because fixed blocks are
+   generally short.  So, while a block always has an EOB, many other
+   literal/length codes have a significantly lower probability of
+   showing up at all.)  However, by making the first table have a
+   lookup of seven bits, the EOB code will be found in that first
+   lookup, and so will not require that too many bits be pulled from
+   the stream.
+ */
+
+STATIC ulg bb;                         /* bit buffer */
+STATIC unsigned bk;                    /* bits in bit buffer */
+
+STATIC const ush mask_bits[] = {
+    0x0000,
+    0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+    0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+#define NEXTBYTE()  ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; })
+#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
+#define DUMPBITS(n) {b>>=(n);k-=(n);}
+
+
+/*
+   Huffman code decoding is performed using a multi-level table lookup.
+   The fastest way to decode is to simply build a lookup table whose
+   size is determined by the longest code.  However, the time it takes
+   to build this table can also be a factor if the data being decoded
+   is not very long.  The most common codes are necessarily the
+   shortest codes, so those codes dominate the decoding time, and hence
+   the speed.  The idea is you can have a shorter table that decodes the
+   shorter, more probable codes, and then point to subsidiary tables for
+   the longer codes.  The time it costs to decode the longer codes is
+   then traded against the time it takes to make longer tables.
+
+   This results of this trade are in the variables lbits and dbits
+   below.  lbits is the number of bits the first level table for literal/
+   length codes can decode in one step, and dbits is the same thing for
+   the distance codes.  Subsequent tables are also less than or equal to
+   those sizes.  These values may be adjusted either when all of the
+   codes are shorter than that, in which case the longest code length in
+   bits is used, or when the shortest code is *longer* than the requested
+   table size, in which case the length of the shortest code in bits is
+   used.
+
+   There are two different values for the two tables, since they code a
+   different number of possibilities each.  The literal/length table
+   codes 286 possible values, or in a flat code, a little over eight
+   bits.  The distance table codes 30 possible values, or a little less
+   than five bits, flat.  The optimum values for speed end up being
+   about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+   The optimum values may differ though from machine to machine, and
+   possibly even between compilers.  Your mileage may vary.
+ */
+
+
+STATIC const int lbits = 9;          /* bits in base literal/length lookup table */
+STATIC const int dbits = 6;          /* bits in base distance lookup table */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
+#define BMAX 16         /* maximum bit length of any code (16 for explode) */
+#define N_MAX 288       /* maximum number of codes in any set */
+
+
+STATIC unsigned hufts;         /* track memory usage */
+
+
+STATIC int INIT huft_build(
+	unsigned *b,            /* code lengths in bits (all assumed <= BMAX) */
+	unsigned n,             /* number of codes (assumed <= N_MAX) */
+	unsigned s,             /* number of simple-valued codes (0..s-1) */
+	const ush *d,           /* list of base values for non-simple codes */
+	const ush *e,           /* list of extra bits for non-simple codes */
+	struct huft **t,        /* result: starting table */
+	int *m                  /* maximum lookup bits, returns actual */
+	)
+/* Given a list of code lengths and a maximum table size, make a set of
+   tables to decode that set of codes.  Return zero on success, one if
+   the given code set is incomplete (the tables are still built in this
+   case), two if the input is invalid (all zero length codes or an
+   oversubscribed set of lengths), and three if not enough memory. */
+{
+  unsigned a;                   /* counter for codes of length k */
+  unsigned c[BMAX+1];           /* bit length count table */
+  unsigned f;                   /* i repeats in table every f entries */
+  int g;                        /* maximum code length */
+  int h;                        /* table level */
+  register unsigned i;          /* counter, current code */
+  register unsigned j;          /* counter */
+  register int k;               /* number of bits in current code */
+  int l;                        /* bits per table (returned in m) */
+  register unsigned *p;         /* pointer into c[], b[], or v[] */
+  register struct huft *q;      /* points to current table */
+  struct huft r;                /* table entry for structure assignment */
+  struct huft *u[BMAX];         /* table stack */
+  unsigned v[N_MAX];            /* values in order of bit length */
+  register int w;               /* bits before this table == (l * h) */
+  unsigned x[BMAX+1];           /* bit offsets, then code stack */
+  unsigned *xp;                 /* pointer into x */
+  int y;                        /* number of dummy codes added */
+  unsigned z;                   /* number of entries in current table */
+
+DEBG("huft1 ");
+
+  /* Generate counts for each bit length */
+  memzero(c, sizeof(c));
+  p = b;  i = n;
+  do {
+    Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), 
+	    n-i, *p));
+    c[*p]++;                    /* assume all entries <= BMAX */
+    p++;                      /* Can't combine with above line (Solaris bug) */
+  } while (--i);
+  if (c[0] == n)                /* null input--all zero length codes */
+  {
+    *t = (struct huft *)NULL;
+    *m = 0;
+    return 0;
+  }
+
+DEBG("huft2 ");
+
+  /* Find minimum and maximum length, bound *m by those */
+  l = *m;
+  for (j = 1; j <= BMAX; j++)
+    if (c[j])
+      break;
+  k = j;                        /* minimum code length */
+  if ((unsigned)l < j)
+    l = j;
+  for (i = BMAX; i; i--)
+    if (c[i])
+      break;
+  g = i;                        /* maximum code length */
+  if ((unsigned)l > i)
+    l = i;
+  *m = l;
+
+DEBG("huft3 ");
+
+  /* Adjust last length count to fill out codes, if needed */
+  for (y = 1 << j; j < i; j++, y <<= 1)
+    if ((y -= c[j]) < 0)
+      return 2;                 /* bad input: more codes than bits */
+  if ((y -= c[i]) < 0)
+    return 2;
+  c[i] += y;
+
+DEBG("huft4 ");
+
+  /* Generate starting offsets into the value table for each length */
+  x[1] = j = 0;
+  p = c + 1;  xp = x + 2;
+  while (--i) {                 /* note that i == g from above */
+    *xp++ = (j += *p++);
+  }
+
+DEBG("huft5 ");
+
+  /* Make a table of values in order of bit lengths */
+  p = b;  i = 0;
+  do {
+    if ((j = *p++) != 0)
+      v[x[j]++] = i;
+  } while (++i < n);
+
+DEBG("h6 ");
+
+  /* Generate the Huffman codes and for each, make the table entries */
+  x[0] = i = 0;                 /* first Huffman code is zero */
+  p = v;                        /* grab values in bit order */
+  h = -1;                       /* no tables yet--level -1 */
+  w = -l;                       /* bits decoded == (l * h) */
+  u[0] = (struct huft *)NULL;   /* just to keep compilers happy */
+  q = (struct huft *)NULL;      /* ditto */
+  z = 0;                        /* ditto */
+DEBG("h6a ");
+
+  /* go through the bit lengths (k already is bits in shortest code) */
+  for (; k <= g; k++)
+  {
+DEBG("h6b ");
+    a = c[k];
+    while (a--)
+    {
+DEBG("h6b1 ");
+      /* here i is the Huffman code of length k bits for value *p */
+      /* make tables up to required level */
+      while (k > w + l)
+      {
+DEBG1("1 ");
+        h++;
+        w += l;                 /* previous table always l bits */
+
+        /* compute minimum size table less than or equal to l bits */
+        z = (z = g - w) > (unsigned)l ? l : z;  /* upper limit on table size */
+        if ((f = 1 << (j = k - w)) > a + 1)     /* try a k-w bit table */
+        {                       /* too few codes for k-w bit table */
+DEBG1("2 ");
+          f -= a + 1;           /* deduct codes from patterns left */
+          xp = c + k;
+          while (++j < z)       /* try smaller tables up to z bits */
+          {
+            if ((f <<= 1) <= *++xp)
+              break;            /* enough codes to use up j bits */
+            f -= *xp;           /* else deduct codes from patterns */
+          }
+        }
+DEBG1("3 ");
+        z = 1 << j;             /* table entries for j-bit table */
+
+        /* allocate and link in new table */
+        if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
+            (struct huft *)NULL)
+        {
+          if (h)
+            huft_free(u[0]);
+          return 3;             /* not enough memory */
+        }
+DEBG1("4 ");
+        hufts += z + 1;         /* track memory usage */
+        *t = q + 1;             /* link to list for huft_free() */
+        *(t = &(q->v.t)) = (struct huft *)NULL;
+        u[h] = ++q;             /* table starts after link */
+
+DEBG1("5 ");
+        /* connect to last table, if there is one */
+        if (h)
+        {
+          x[h] = i;             /* save pattern for backing up */
+          r.b = (uch)l;         /* bits to dump before this table */
+          r.e = (uch)(16 + j);  /* bits in this table */
+          r.v.t = q;            /* pointer to this table */
+          j = i >> (w - l);     /* (get around Turbo C bug) */
+          u[h-1][j] = r;        /* connect to last table */
+        }
+DEBG1("6 ");
+      }
+DEBG("h6c ");
+
+      /* set up table entry in r */
+      r.b = (uch)(k - w);
+      if (p >= v + n)
+        r.e = 99;               /* out of values--invalid code */
+      else if (*p < s)
+      {
+        r.e = (uch)(*p < 256 ? 16 : 15);    /* 256 is end-of-block code */
+        r.v.n = (ush)(*p);             /* simple code is just the value */
+	p++;                           /* one compiler does not like *p++ */
+      }
+      else
+      {
+        r.e = (uch)e[*p - s];   /* non-simple--look up in lists */
+        r.v.n = d[*p++ - s];
+      }
+DEBG("h6d ");
+
+      /* fill code-like entries with r */
+      f = 1 << (k - w);
+      for (j = i >> w; j < z; j += f)
+        q[j] = r;
+
+      /* backwards increment the k-bit code i */
+      for (j = 1 << (k - 1); i & j; j >>= 1)
+        i ^= j;
+      i ^= j;
+
+      /* backup over finished tables */
+      while ((i & ((1 << w) - 1)) != x[h])
+      {
+        h--;                    /* don't need to update q */
+        w -= l;
+      }
+DEBG("h6e ");
+    }
+DEBG("h6f ");
+  }
+
+DEBG("huft7 ");
+
+  /* Return true (1) if we were given an incomplete table */
+  return y != 0 && g != 1;
+}
+
+
+
+STATIC int INIT huft_free(
+	struct huft *t         /* table to free */
+	)
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+   list of the tables it made, with the links in a dummy first entry of
+   each table. */
+{
+  register struct huft *p, *q;
+
+
+  /* Go through linked list, freeing from the malloced (t[-1]) address. */
+  p = t;
+  while (p != (struct huft *)NULL)
+  {
+    q = (--p)->v.t;
+    free((char*)p);
+    p = q;
+  } 
+  return 0;
+}
+
+
+STATIC int INIT inflate_codes(
+	struct huft *tl,    /* literal/length decoder tables */
+	struct huft *td,    /* distance decoder tables */
+	int bl,             /* number of bits decoded by tl[] */
+	int bd              /* number of bits decoded by td[] */
+	)
+/* inflate (decompress) the codes in a deflated (compressed) block.
+   Return an error code or zero if it all goes ok. */
+{
+  register unsigned e;  /* table entry flag/number of extra bits */
+  unsigned n, d;        /* length and index for copy */
+  unsigned w;           /* current window position */
+  struct huft *t;       /* pointer to table entry */
+  unsigned ml, md;      /* masks for bl and bd bits */
+  register ulg b;       /* bit buffer */
+  register unsigned k;  /* number of bits in bit buffer */
+
+
+  /* make local copies of globals */
+  b = bb;                       /* initialize bit buffer */
+  k = bk;
+  w = wp;                       /* initialize window position */
+
+  /* inflate the coded data */
+  ml = mask_bits[bl];           /* precompute masks for speed */
+  md = mask_bits[bd];
+  for (;;)                      /* do until end of block */
+  {
+    NEEDBITS((unsigned)bl)
+    if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
+      do {
+        if (e == 99)
+          return 1;
+        DUMPBITS(t->b)
+        e -= 16;
+        NEEDBITS(e)
+      } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+    DUMPBITS(t->b)
+    if (e == 16)                /* then it's a literal */
+    {
+      slide[w++] = (uch)t->v.n;
+      Tracevv((stderr, "%c", slide[w-1]));
+      if (w == WSIZE)
+      {
+        flush_output(w);
+        w = 0;
+      }
+    }
+    else                        /* it's an EOB or a length */
+    {
+      /* exit if end of block */
+      if (e == 15)
+        break;
+
+      /* get length of block to copy */
+      NEEDBITS(e)
+      n = t->v.n + ((unsigned)b & mask_bits[e]);
+      DUMPBITS(e);
+
+      /* decode distance of block to copy */
+      NEEDBITS((unsigned)bd)
+      if ((e = (t = td + ((unsigned)b & md))->e) > 16)
+        do {
+          if (e == 99)
+            return 1;
+          DUMPBITS(t->b)
+          e -= 16;
+          NEEDBITS(e)
+        } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+      DUMPBITS(t->b)
+      NEEDBITS(e)
+      d = w - t->v.n - ((unsigned)b & mask_bits[e]);
+      DUMPBITS(e)
+      Tracevv((stderr,"\\[%d,%d]", w-d, n));
+
+      /* do the copy */
+      do {
+        n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e);
+#if !defined(NOMEMCPY) && !defined(DEBUG)
+        if (w - d >= e)         /* (this test assumes unsigned comparison) */
+        {
+          memcpy(slide + w, slide + d, e);
+          w += e;
+          d += e;
+        }
+        else                      /* do it slow to avoid memcpy() overlap */
+#endif /* !NOMEMCPY */
+          do {
+            slide[w++] = slide[d++];
+	    Tracevv((stderr, "%c", slide[w-1]));
+          } while (--e);
+        if (w == WSIZE)
+        {
+          flush_output(w);
+          w = 0;
+        }
+      } while (n);
+    }
+  }
+
+
+  /* restore the globals from the locals */
+  wp = w;                       /* restore global window pointer */
+  bb = b;                       /* restore global bit buffer */
+  bk = k;
+
+  /* done */
+  return 0;
+
+ underrun:
+  return 4;			/* Input underrun */
+}
+
+
+
+STATIC int INIT inflate_stored(void)
+/* "decompress" an inflated type 0 (stored) block. */
+{
+  unsigned n;           /* number of bytes in block */
+  unsigned w;           /* current window position */
+  register ulg b;       /* bit buffer */
+  register unsigned k;  /* number of bits in bit buffer */
+
+DEBG("<stor");
+
+  /* make local copies of globals */
+  b = bb;                       /* initialize bit buffer */
+  k = bk;
+  w = wp;                       /* initialize window position */
+
+
+  /* go to byte boundary */
+  n = k & 7;
+  DUMPBITS(n);
+
+
+  /* get the length and its complement */
+  NEEDBITS(16)
+  n = ((unsigned)b & 0xffff);
+  DUMPBITS(16)
+  NEEDBITS(16)
+  if (n != (unsigned)((~b) & 0xffff))
+    return 1;                   /* error in compressed data */
+  DUMPBITS(16)
+
+
+  /* read and output the compressed data */
+  while (n--)
+  {
+    NEEDBITS(8)
+    slide[w++] = (uch)b;
+    if (w == WSIZE)
+    {
+      flush_output(w);
+      w = 0;
+    }
+    DUMPBITS(8)
+  }
+
+
+  /* restore the globals from the locals */
+  wp = w;                       /* restore global window pointer */
+  bb = b;                       /* restore global bit buffer */
+  bk = k;
+
+  DEBG(">");
+  return 0;
+
+ underrun:
+  return 4;			/* Input underrun */
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_fixed(void)
+/* decompress an inflated type 1 (fixed Huffman codes) block.  We should
+   either replace this with a custom decoder, or at least precompute the
+   Huffman tables. */
+{
+  int i;                /* temporary variable */
+  struct huft *tl;      /* literal/length code table */
+  struct huft *td;      /* distance code table */
+  int bl;               /* lookup bits for tl */
+  int bd;               /* lookup bits for td */
+  unsigned l[288];      /* length list for huft_build */
+
+DEBG("<fix");
+
+  /* set up literal table */
+  for (i = 0; i < 144; i++)
+    l[i] = 8;
+  for (; i < 256; i++)
+    l[i] = 9;
+  for (; i < 280; i++)
+    l[i] = 7;
+  for (; i < 288; i++)          /* make a complete, but wrong code set */
+    l[i] = 8;
+  bl = 7;
+  if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0)
+    return i;
+
+
+  /* set up distance table */
+  for (i = 0; i < 30; i++)      /* make an incomplete code set */
+    l[i] = 5;
+  bd = 5;
+  if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1)
+  {
+    huft_free(tl);
+
+    DEBG(">");
+    return i;
+  }
+
+
+  /* decompress until an end-of-block code */
+  if (inflate_codes(tl, td, bl, bd))
+    return 1;
+
+
+  /* free the decoding tables, return */
+  huft_free(tl);
+  huft_free(td);
+  return 0;
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_dynamic(void)
+/* decompress an inflated type 2 (dynamic Huffman codes) block. */
+{
+  int i;                /* temporary variables */
+  unsigned j;
+  unsigned l;           /* last length */
+  unsigned m;           /* mask for bit lengths table */
+  unsigned n;           /* number of lengths to get */
+  struct huft *tl;      /* literal/length code table */
+  struct huft *td;      /* distance code table */
+  int bl;               /* lookup bits for tl */
+  int bd;               /* lookup bits for td */
+  unsigned nb;          /* number of bit length codes */
+  unsigned nl;          /* number of literal/length codes */
+  unsigned nd;          /* number of distance codes */
+#ifdef PKZIP_BUG_WORKAROUND
+  unsigned ll[288+32];  /* literal/length and distance code lengths */
+#else
+  unsigned ll[286+30];  /* literal/length and distance code lengths */
+#endif
+  register ulg b;       /* bit buffer */
+  register unsigned k;  /* number of bits in bit buffer */
+
+DEBG("<dyn");
+
+  /* make local bit buffer */
+  b = bb;
+  k = bk;
+
+
+  /* read in table lengths */
+  NEEDBITS(5)
+  nl = 257 + ((unsigned)b & 0x1f);      /* number of literal/length codes */
+  DUMPBITS(5)
+  NEEDBITS(5)
+  nd = 1 + ((unsigned)b & 0x1f);        /* number of distance codes */
+  DUMPBITS(5)
+  NEEDBITS(4)
+  nb = 4 + ((unsigned)b & 0xf);         /* number of bit length codes */
+  DUMPBITS(4)
+#ifdef PKZIP_BUG_WORKAROUND
+  if (nl > 288 || nd > 32)
+#else
+  if (nl > 286 || nd > 30)
+#endif
+    return 1;                   /* bad lengths */
+
+DEBG("dyn1 ");
+
+  /* read in bit-length-code lengths */
+  for (j = 0; j < nb; j++)
+  {
+    NEEDBITS(3)
+    ll[border[j]] = (unsigned)b & 7;
+    DUMPBITS(3)
+  }
+  for (; j < 19; j++)
+    ll[border[j]] = 0;
+
+DEBG("dyn2 ");
+
+  /* build decoding table for trees--single level, 7 bit lookup */
+  bl = 7;
+  if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0)
+  {
+    if (i == 1)
+      huft_free(tl);
+    return i;                   /* incomplete code set */
+  }
+
+DEBG("dyn3 ");
+
+  /* read in literal and distance code lengths */
+  n = nl + nd;
+  m = mask_bits[bl];
+  i = l = 0;
+  while ((unsigned)i < n)
+  {
+    NEEDBITS((unsigned)bl)
+    j = (td = tl + ((unsigned)b & m))->b;
+    DUMPBITS(j)
+    j = td->v.n;
+    if (j < 16)                 /* length of code in bits (0..15) */
+      ll[i++] = l = j;          /* save last length in l */
+    else if (j == 16)           /* repeat last length 3 to 6 times */
+    {
+      NEEDBITS(2)
+      j = 3 + ((unsigned)b & 3);
+      DUMPBITS(2)
+      if ((unsigned)i + j > n)
+        return 1;
+      while (j--)
+        ll[i++] = l;
+    }
+    else if (j == 17)           /* 3 to 10 zero length codes */
+    {
+      NEEDBITS(3)
+      j = 3 + ((unsigned)b & 7);
+      DUMPBITS(3)
+      if ((unsigned)i + j > n)
+        return 1;
+      while (j--)
+        ll[i++] = 0;
+      l = 0;
+    }
+    else                        /* j == 18: 11 to 138 zero length codes */
+    {
+      NEEDBITS(7)
+      j = 11 + ((unsigned)b & 0x7f);
+      DUMPBITS(7)
+      if ((unsigned)i + j > n)
+        return 1;
+      while (j--)
+        ll[i++] = 0;
+      l = 0;
+    }
+  }
+
+DEBG("dyn4 ");
+
+  /* free decoding table for trees */
+  huft_free(tl);
+
+DEBG("dyn5 ");
+
+  /* restore the global bit buffer */
+  bb = b;
+  bk = k;
+
+DEBG("dyn5a ");
+
+  /* build the decoding tables for literal/length and distance codes */
+  bl = lbits;
+  if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0)
+  {
+DEBG("dyn5b ");
+    if (i == 1) {
+      error("incomplete literal tree");
+      huft_free(tl);
+    }
+    return i;                   /* incomplete code set */
+  }
+DEBG("dyn5c ");
+  bd = dbits;
+  if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
+  {
+DEBG("dyn5d ");
+    if (i == 1) {
+      error("incomplete distance tree");
+#ifdef PKZIP_BUG_WORKAROUND
+      i = 0;
+    }
+#else
+      huft_free(td);
+    }
+    huft_free(tl);
+    return i;                   /* incomplete code set */
+#endif
+  }
+
+DEBG("dyn6 ");
+
+  /* decompress until an end-of-block code */
+  if (inflate_codes(tl, td, bl, bd))
+    return 1;
+
+DEBG("dyn7 ");
+
+  /* free the decoding tables, return */
+  huft_free(tl);
+  huft_free(td);
+
+  DEBG(">");
+  return 0;
+
+ underrun:
+  return 4;			/* Input underrun */
+}
+
+
+
+STATIC int INIT inflate_block(
+	int *e                  /* last block flag */
+	)
+/* decompress an inflated block */
+{
+  unsigned t;           /* block type */
+  register ulg b;       /* bit buffer */
+  register unsigned k;  /* number of bits in bit buffer */
+
+  DEBG("<blk");
+
+  /* make local bit buffer */
+  b = bb;
+  k = bk;
+
+
+  /* read in last block bit */
+  NEEDBITS(1)
+  *e = (int)b & 1;
+  DUMPBITS(1)
+
+
+  /* read in block type */
+  NEEDBITS(2)
+  t = (unsigned)b & 3;
+  DUMPBITS(2)
+
+
+  /* restore the global bit buffer */
+  bb = b;
+  bk = k;
+
+  /* inflate that block type */
+  if (t == 2)
+    return inflate_dynamic();
+  if (t == 0)
+    return inflate_stored();
+  if (t == 1)
+    return inflate_fixed();
+
+  DEBG(">");
+
+  /* bad block type */
+  return 2;
+
+ underrun:
+  return 4;			/* Input underrun */
+}
+
+
+
+STATIC int INIT inflate(void)
+/* decompress an inflated entry */
+{
+  int e;                /* last block flag */
+  int r;                /* result code */
+  unsigned h;           /* maximum struct huft's malloc'ed */
+  void *ptr;
+
+  /* initialize window, bit buffer */
+  wp = 0;
+  bk = 0;
+  bb = 0;
+
+
+  /* decompress until the last block */
+  h = 0;
+  do {
+    hufts = 0;
+    gzip_mark(&ptr);
+    if ((r = inflate_block(&e)) != 0) {
+      gzip_release(&ptr);	    
+      return r;
+    }
+    gzip_release(&ptr);
+    if (hufts > h)
+      h = hufts;
+  } while (!e);
+
+  /* Undo too much lookahead. The next read will be byte aligned so we
+   * can discard unused bits in the last meaningful byte.
+   */
+  while (bk >= 8) {
+    bk -= 8;
+    inptr--;
+  }
+
+  /* flush out slide */
+  flush_output(wp);
+
+
+  /* return success */
+#ifdef DEBUG
+  fprintf(stderr, "<%u> ", h);
+#endif /* DEBUG */
+  return 0;
+}
+
+/**********************************************************************
+ *
+ * The following are support routines for inflate.c
+ *
+ **********************************************************************/
+
+static ulg crc_32_tab[256];
+static ulg crc;		/* initialized in makecrc() so it'll reside in bss */
+#define CRC_VALUE (crc ^ 0xffffffffUL)
+
+/*
+ * Code to compute the CRC-32 table. Borrowed from 
+ * gzip-1.0.3/makecrc.c.
+ */
+
+static void INIT
+makecrc(void)
+{
+/* Not copyrighted 1990 Mark Adler	*/
+
+  unsigned long c;      /* crc shift register */
+  unsigned long e;      /* polynomial exclusive-or pattern */
+  int i;                /* counter for all possible eight bit values */
+  int k;                /* byte being shifted into crc apparatus */
+
+  /* terms of polynomial defining this crc (except x^32): */
+  static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+  /* Make exclusive-or pattern from polynomial */
+  e = 0;
+  for (i = 0; i < sizeof(p)/sizeof(int); i++)
+    e |= 1L << (31 - p[i]);
+
+  crc_32_tab[0] = 0;
+
+  for (i = 1; i < 256; i++)
+  {
+    c = 0;
+    for (k = i | 256; k != 1; k >>= 1)
+    {
+      c = c & 1 ? (c >> 1) ^ e : c >> 1;
+      if (k & 1)
+        c ^= e;
+    }
+    crc_32_tab[i] = c;
+  }
+
+  /* this is initialized here so this code could reside in ROM */
+  crc = (ulg)0xffffffffUL; /* shift register contents */
+}
+
+/* gzip flag byte */
+#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
+#define COMMENT      0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
+#define RESERVED     0xC0 /* bit 6,7:   reserved */
+
+/*
+ * Do the uncompression!
+ */
+static int INIT gunzip(void)
+{
+    uch flags;
+    unsigned char magic[2]; /* magic header */
+    char method;
+    ulg orig_crc = 0;       /* original crc */
+    ulg orig_len = 0;       /* original uncompressed length */
+    int res;
+
+    magic[0] = NEXTBYTE();
+    magic[1] = NEXTBYTE();
+    method   = NEXTBYTE();
+
+    if (magic[0] != 037 ||
+	((magic[1] != 0213) && (magic[1] != 0236))) {
+	    error("bad gzip magic numbers");
+	    return -1;
+    }
+
+    /* We only support method #8, DEFLATED */
+    if (method != 8)  {
+	    error("internal error, invalid method");
+	    return -1;
+    }
+
+    flags  = (uch)get_byte();
+    if ((flags & ENCRYPTED) != 0) {
+	    error("Input is encrypted");
+	    return -1;
+    }
+    if ((flags & CONTINUATION) != 0) {
+	    error("Multi part input");
+	    return -1;
+    }
+    if ((flags & RESERVED) != 0) {
+	    error("Input has invalid flags");
+	    return -1;
+    }
+    NEXTBYTE();	/* Get timestamp */
+    NEXTBYTE();
+    NEXTBYTE();
+    NEXTBYTE();
+
+    (void)NEXTBYTE();  /* Ignore extra flags for the moment */
+    (void)NEXTBYTE();  /* Ignore OS type for the moment */
+
+    if ((flags & EXTRA_FIELD) != 0) {
+	    unsigned len = (unsigned)NEXTBYTE();
+	    len |= ((unsigned)NEXTBYTE())<<8;
+	    while (len--) (void)NEXTBYTE();
+    }
+
+    /* Get original file name if it was truncated */
+    if ((flags & ORIG_NAME) != 0) {
+	    /* Discard the old name */
+	    while (NEXTBYTE() != 0) /* null */ ;
+    } 
+
+    /* Discard file comment if any */
+    if ((flags & COMMENT) != 0) {
+	    while (NEXTBYTE() != 0) /* null */ ;
+    }
+
+    /* Decompress */
+    if ((res = inflate())) {
+	    switch (res) {
+	    case 0:
+		    break;
+	    case 1:
+		    error("invalid compressed format (err=1)");
+		    break;
+	    case 2:
+		    error("invalid compressed format (err=2)");
+		    break;
+	    case 3:
+		    error("out of memory");
+		    break;
+	    case 4:
+		    error("out of input data");
+		    break;
+	    default:
+		    error("invalid compressed format (other)");
+	    }
+	    return -1;
+    }
+	    
+    /* Get the crc and original length */
+    /* crc32  (see algorithm.doc)
+     * uncompressed input size modulo 2^32
+     */
+    orig_crc = (ulg) NEXTBYTE();
+    orig_crc |= (ulg) NEXTBYTE() << 8;
+    orig_crc |= (ulg) NEXTBYTE() << 16;
+    orig_crc |= (ulg) NEXTBYTE() << 24;
+    
+    orig_len = (ulg) NEXTBYTE();
+    orig_len |= (ulg) NEXTBYTE() << 8;
+    orig_len |= (ulg) NEXTBYTE() << 16;
+    orig_len |= (ulg) NEXTBYTE() << 24;
+    
+    /* Validate decompression */
+    if (orig_crc != CRC_VALUE) {
+	    error("crc error");
+	    return -1;
+    }
+    if (orig_len != bytes_out) {
+	    error("length error");
+	    return -1;
+    }
+    return 0;
+
+ underrun:			/* NEXTBYTE() goto's here if needed */
+    error("out of input data");
+    return -1;
+}
+
+
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
new file mode 100644
index 0000000..a5d2cdc
--- /dev/null
+++ b/lib/int_sqrt.c
@@ -0,0 +1,32 @@
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/**
+ * int_sqrt - rough approximation to sqrt
+ * @x: integer of which to calculate the sqrt
+ *
+ * A very rough approximation to the sqrt() function.
+ */
+unsigned long int_sqrt(unsigned long x)
+{
+	unsigned long op, res, one;
+
+	op = x;
+	res = 0;
+
+	one = 1 << 30;
+	while (one > op)
+		one >>= 2;
+
+	while (one != 0) {
+		if (op >= res + one) {
+			op = op - (res + one);
+			res = res +  2 * one;
+		}
+		res /= 2;
+		one /= 4;
+	}
+	return res;
+}
+EXPORT_SYMBOL(int_sqrt);
diff --git a/lib/iomap.c b/lib/iomap.c
new file mode 100644
index 0000000..5e74390
--- /dev/null
+++ b/lib/iomap.c
@@ -0,0 +1,212 @@
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines don't assume any hardware mappings, and just
+ * encode the PIO/MMIO as part of the cookie. They coldly assume that
+ * the MMIO IO mappings are not in the low address range.
+ *
+ * Architectures for which this is not true can't use this generic
+ * implementation and should do their own copy.
+ */
+
+#ifndef HAVE_ARCH_PIO_SIZE
+/*
+ * We encode the physical PIO addresses (0-0xffff) into the
+ * pointer by offsetting them with a constant (0x10000) and
+ * assuming that all the low addresses are always PIO. That means
+ * we can do some sanity checks on the low bits, and don't
+ * need to just take things for granted.
+ */
+#define PIO_OFFSET	0x10000UL
+#define PIO_MASK	0x0ffffUL
+#define PIO_RESERVED	0x40000UL
+#endif
+
+/*
+ * Ugly macros are a way of life.
+ */
+#define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
+
+#define IO_COND(addr, is_pio, is_mmio) do {			\
+	unsigned long port = (unsigned long __force)addr;	\
+	if (port < PIO_RESERVED) {				\
+		VERIFY_PIO(port);				\
+		port &= PIO_MASK;				\
+		is_pio;						\
+	} else {						\
+		is_mmio;					\
+	}							\
+} while (0)
+
+unsigned int fastcall ioread8(void __iomem *addr)
+{
+	IO_COND(addr, return inb(port), return readb(addr));
+}
+unsigned int fastcall ioread16(void __iomem *addr)
+{
+	IO_COND(addr, return inw(port), return readw(addr));
+}
+unsigned int fastcall ioread32(void __iomem *addr)
+{
+	IO_COND(addr, return inl(port), return readl(addr));
+}
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread32);
+
+void fastcall iowrite8(u8 val, void __iomem *addr)
+{
+	IO_COND(addr, outb(val,port), writeb(val, addr));
+}
+void fastcall iowrite16(u16 val, void __iomem *addr)
+{
+	IO_COND(addr, outw(val,port), writew(val, addr));
+}
+void fastcall iowrite32(u32 val, void __iomem *addr)
+{
+	IO_COND(addr, outl(val,port), writel(val, addr));
+}
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite32);
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__raw" accesses, since we don't want to
+ * convert to CPU byte order. We write in "IO byte
+ * order" (we also don't have IO barriers).
+ */
+static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+{
+	while (--count >= 0) {
+		u8 data = __raw_readb(addr);
+		*dst = data;
+		dst++;
+	}
+}
+static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+{
+	while (--count >= 0) {
+		u16 data = __raw_readw(addr);
+		*dst = data;
+		dst++;
+	}
+}
+static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+{
+	while (--count >= 0) {
+		u32 data = __raw_readl(addr);
+		*dst = data;
+		dst++;
+	}
+}
+
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writeb(*src, addr);
+		src++;
+	}
+}
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writew(*src, addr);
+		src++;
+	}
+}
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writel(*src, addr);
+		src++;
+	}
+}
+
+void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
+}
+void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
+}
+void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
+}
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+
+void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
+}
+void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
+}
+void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
+}
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	if (port > PIO_MASK)
+		return NULL;
+	return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	IO_COND(addr, /* nothing */, iounmap(addr));
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
new file mode 100644
index 0000000..99b0ae3
--- /dev/null
+++ b/lib/kernel_lock.c
@@ -0,0 +1,264 @@
+/*
+ * lib/kernel_lock.c
+ *
+ * This is the traditional BKL - big kernel lock. Largely
+ * relegated to obsolescense, but used by various less
+ * important (or lazy) subsystems.
+ */
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
+		defined(CONFIG_DEBUG_PREEMPT)
+
+/*
+ * Debugging check.
+ */
+unsigned int smp_processor_id(void)
+{
+	unsigned long preempt_count = preempt_count();
+	int this_cpu = __smp_processor_id();
+	cpumask_t this_mask;
+
+	if (likely(preempt_count))
+		goto out;
+
+	if (irqs_disabled())
+		goto out;
+
+	/*
+	 * Kernel threads bound to a single CPU can safely use
+	 * smp_processor_id():
+	 */
+	this_mask = cpumask_of_cpu(this_cpu);
+
+	if (cpus_equal(current->cpus_allowed, this_mask))
+		goto out;
+
+	/*
+	 * It is valid to assume CPU-locality during early bootup:
+	 */
+	if (system_state != SYSTEM_RUNNING)
+		goto out;
+
+	/*
+	 * Avoid recursion:
+	 */
+	preempt_disable();
+
+	if (!printk_ratelimit())
+		goto out_enable;
+
+	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+	dump_stack();
+
+out_enable:
+	preempt_enable_no_resched();
+out:
+	return this_cpu;
+}
+
+EXPORT_SYMBOL(smp_processor_id);
+
+#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
+
+#ifdef CONFIG_PREEMPT_BKL
+/*
+ * The 'big kernel semaphore'
+ *
+ * This mutex is taken and released recursively by lock_kernel()
+ * and unlock_kernel().  It is transparently dropped and reaquired
+ * over schedule().  It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Note: code locked by this semaphore will only be serialized against
+ * other code using the same locking facility. The code guarantees that
+ * the task remains on the same CPU.
+ *
+ * Don't use in new code.
+ */
+static DECLARE_MUTEX(kernel_sem);
+
+/*
+ * Re-acquire the kernel semaphore.
+ *
+ * This function is called with preemption off.
+ *
+ * We are executing in schedule() so the code must be extremely careful
+ * about recursion, both due to the down() and due to the enabling of
+ * preemption. schedule() will re-check the preemption flag after
+ * reacquiring the semaphore.
+ */
+int __lockfunc __reacquire_kernel_lock(void)
+{
+	struct task_struct *task = current;
+	int saved_lock_depth = task->lock_depth;
+
+	BUG_ON(saved_lock_depth < 0);
+
+	task->lock_depth = -1;
+	preempt_enable_no_resched();
+
+	down(&kernel_sem);
+
+	preempt_disable();
+	task->lock_depth = saved_lock_depth;
+
+	return 0;
+}
+
+void __lockfunc __release_kernel_lock(void)
+{
+	up(&kernel_sem);
+}
+
+/*
+ * Getting the big kernel semaphore.
+ */
+void __lockfunc lock_kernel(void)
+{
+	struct task_struct *task = current;
+	int depth = task->lock_depth + 1;
+
+	if (likely(!depth))
+		/*
+		 * No recursion worries - we set up lock_depth _after_
+		 */
+		down(&kernel_sem);
+
+	task->lock_depth = depth;
+}
+
+void __lockfunc unlock_kernel(void)
+{
+	struct task_struct *task = current;
+
+	BUG_ON(task->lock_depth < 0);
+
+	if (likely(--task->lock_depth < 0))
+		up(&kernel_sem);
+}
+
+#else
+
+/*
+ * The 'big kernel lock'
+ *
+ * This spinlock is taken and released recursively by lock_kernel()
+ * and unlock_kernel().  It is transparently dropped and reaquired
+ * over schedule().  It is used to protect legacy code that hasn't
+ * been migrated to a proper locking design yet.
+ *
+ * Don't use in new code.
+ */
+static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+
+
+/*
+ * Acquire/release the underlying lock from the scheduler.
+ *
+ * This is called with preemption disabled, and should
+ * return an error value if it cannot get the lock and
+ * TIF_NEED_RESCHED gets set.
+ *
+ * If it successfully gets the lock, it should increment
+ * the preemption count like any spinlock does.
+ *
+ * (This works on UP too - _raw_spin_trylock will never
+ * return false in that case)
+ */
+int __lockfunc __reacquire_kernel_lock(void)
+{
+	while (!_raw_spin_trylock(&kernel_flag)) {
+		if (test_thread_flag(TIF_NEED_RESCHED))
+			return -EAGAIN;
+		cpu_relax();
+	}
+	preempt_disable();
+	return 0;
+}
+
+void __lockfunc __release_kernel_lock(void)
+{
+	_raw_spin_unlock(&kernel_flag);
+	preempt_enable_no_resched();
+}
+
+/*
+ * These are the BKL spinlocks - we try to be polite about preemption. 
+ * If SMP is not on (ie UP preemption), this all goes away because the
+ * _raw_spin_trylock() will always succeed.
+ */
+#ifdef CONFIG_PREEMPT
+static inline void __lock_kernel(void)
+{
+	preempt_disable();
+	if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+		/*
+		 * If preemption was disabled even before this
+		 * was called, there's nothing we can be polite
+		 * about - just spin.
+		 */
+		if (preempt_count() > 1) {
+			_raw_spin_lock(&kernel_flag);
+			return;
+		}
+
+		/*
+		 * Otherwise, let's wait for the kernel lock
+		 * with preemption enabled..
+		 */
+		do {
+			preempt_enable();
+			while (spin_is_locked(&kernel_flag))
+				cpu_relax();
+			preempt_disable();
+		} while (!_raw_spin_trylock(&kernel_flag));
+	}
+}
+
+#else
+
+/*
+ * Non-preemption case - just get the spinlock
+ */
+static inline void __lock_kernel(void)
+{
+	_raw_spin_lock(&kernel_flag);
+}
+#endif
+
+static inline void __unlock_kernel(void)
+{
+	_raw_spin_unlock(&kernel_flag);
+	preempt_enable();
+}
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously, so we only need to
+ * worry about other CPU's.
+ */
+void __lockfunc lock_kernel(void)
+{
+	int depth = current->lock_depth+1;
+	if (likely(!depth))
+		__lock_kernel();
+	current->lock_depth = depth;
+}
+
+void __lockfunc unlock_kernel(void)
+{
+	BUG_ON(current->lock_depth < 0);
+	if (likely(--current->lock_depth < 0))
+		__unlock_kernel();
+}
+
+#endif
+
+EXPORT_SYMBOL(lock_kernel);
+EXPORT_SYMBOL(unlock_kernel);
+
diff --git a/lib/kobject.c b/lib/kobject.c
new file mode 100644
index 0000000..ff94919
--- /dev/null
+++ b/lib/kobject.c
@@ -0,0 +1,544 @@
+/*
+ * kobject.c - library routines for handling generic kernel objects
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ *
+ * Please see the file Documentation/kobject.txt for critical information
+ * about using the kobject interface.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+
+/**
+ *	populate_dir - populate directory with attributes.
+ *	@kobj:	object we're working on.
+ *
+ *	Most subsystems have a set of default attributes that 
+ *	are associated with an object that registers with them.
+ *	This is a helper called during object registration that 
+ *	loops through the default attributes of the subsystem 
+ *	and creates attributes files for them in sysfs.
+ *
+ */
+
+static int populate_dir(struct kobject * kobj)
+{
+	struct kobj_type * t = get_ktype(kobj);
+	struct attribute * attr;
+	int error = 0;
+	int i;
+	
+	if (t && t->default_attrs) {
+		for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
+			if ((error = sysfs_create_file(kobj,attr)))
+				break;
+		}
+	}
+	return error;
+}
+
+static int create_dir(struct kobject * kobj)
+{
+	int error = 0;
+	if (kobject_name(kobj)) {
+		error = sysfs_create_dir(kobj);
+		if (!error) {
+			if ((error = populate_dir(kobj)))
+				sysfs_remove_dir(kobj);
+		}
+	}
+	return error;
+}
+
+static inline struct kobject * to_kobj(struct list_head * entry)
+{
+	return container_of(entry,struct kobject,entry);
+}
+
+static int get_kobj_path_length(struct kobject *kobj)
+{
+	int length = 1;
+	struct kobject * parent = kobj;
+
+	/* walk up the ancestors until we hit the one pointing to the 
+	 * root.
+	 * Add 1 to strlen for leading '/' of each level.
+	 */
+	do {
+		length += strlen(kobject_name(parent)) + 1;
+		parent = parent->parent;
+	} while (parent);
+	return length;
+}
+
+static void fill_kobj_path(struct kobject *kobj, char *path, int length)
+{
+	struct kobject * parent;
+
+	--length;
+	for (parent = kobj; parent; parent = parent->parent) {
+		int cur = strlen(kobject_name(parent));
+		/* back up enough to print this name with '/' */
+		length -= cur;
+		strncpy (path + length, kobject_name(parent), cur);
+		*(path + --length) = '/';
+	}
+
+	pr_debug("%s: path = '%s'\n",__FUNCTION__,path);
+}
+
+/**
+ * kobject_get_path - generate and return the path associated with a given kobj
+ * and kset pair.  The result must be freed by the caller with kfree().
+ *
+ * @kobj:	kobject in question, with which to build the path
+ * @gfp_mask:	the allocation type used to allocate the path
+ */
+char *kobject_get_path(struct kobject *kobj, int gfp_mask)
+{
+	char *path;
+	int len;
+
+	len = get_kobj_path_length(kobj);
+	path = kmalloc(len, gfp_mask);
+	if (!path)
+		return NULL;
+	memset(path, 0x00, len);
+	fill_kobj_path(kobj, path, len);
+
+	return path;
+}
+
+/**
+ *	kobject_init - initialize object.
+ *	@kobj:	object in question.
+ */
+void kobject_init(struct kobject * kobj)
+{
+	kref_init(&kobj->kref);
+	INIT_LIST_HEAD(&kobj->entry);
+	kobj->kset = kset_get(kobj->kset);
+}
+
+
+/**
+ *	unlink - remove kobject from kset list.
+ *	@kobj:	kobject.
+ *
+ *	Remove the kobject from the kset list and decrement
+ *	its parent's refcount.
+ *	This is separated out, so we can use it in both 
+ *	kobject_del() and kobject_add() on error.
+ */
+
+static void unlink(struct kobject * kobj)
+{
+	if (kobj->kset) {
+		spin_lock(&kobj->kset->list_lock);
+		list_del_init(&kobj->entry);
+		spin_unlock(&kobj->kset->list_lock);
+	}
+	kobject_put(kobj);
+}
+
+/**
+ *	kobject_add - add an object to the hierarchy.
+ *	@kobj:	object.
+ */
+
+int kobject_add(struct kobject * kobj)
+{
+	int error = 0;
+	struct kobject * parent;
+
+	if (!(kobj = kobject_get(kobj)))
+		return -ENOENT;
+	if (!kobj->k_name)
+		kobj->k_name = kobj->name;
+	parent = kobject_get(kobj->parent);
+
+	pr_debug("kobject %s: registering. parent: %s, set: %s\n",
+		 kobject_name(kobj), parent ? kobject_name(parent) : "<NULL>", 
+		 kobj->kset ? kobj->kset->kobj.name : "<NULL>" );
+
+	if (kobj->kset) {
+		spin_lock(&kobj->kset->list_lock);
+
+		if (!parent)
+			parent = kobject_get(&kobj->kset->kobj);
+
+		list_add_tail(&kobj->entry,&kobj->kset->list);
+		spin_unlock(&kobj->kset->list_lock);
+	}
+	kobj->parent = parent;
+
+	error = create_dir(kobj);
+	if (error) {
+		/* unlink does the kobject_put() for us */
+		unlink(kobj);
+		if (parent)
+			kobject_put(parent);
+	} else {
+		kobject_hotplug(kobj, KOBJ_ADD);
+	}
+
+	return error;
+}
+
+
+/**
+ *	kobject_register - initialize and add an object.
+ *	@kobj:	object in question.
+ */
+
+int kobject_register(struct kobject * kobj)
+{
+	int error = 0;
+	if (kobj) {
+		kobject_init(kobj);
+		error = kobject_add(kobj);
+		if (error) {
+			printk("kobject_register failed for %s (%d)\n",
+			       kobject_name(kobj),error);
+			dump_stack();
+		}
+	} else
+		error = -EINVAL;
+	return error;
+}
+
+
+/**
+ *	kobject_set_name - Set the name of an object
+ *	@kobj:	object.
+ *	@name:	name. 
+ *
+ *	If strlen(name) >= KOBJ_NAME_LEN, then use a dynamically allocated
+ *	string that @kobj->k_name points to. Otherwise, use the static 
+ *	@kobj->name array.
+ */
+
+int kobject_set_name(struct kobject * kobj, const char * fmt, ...)
+{
+	int error = 0;
+	int limit = KOBJ_NAME_LEN;
+	int need;
+	va_list args;
+	char * name;
+
+	/* 
+	 * First, try the static array 
+	 */
+	va_start(args,fmt);
+	need = vsnprintf(kobj->name,limit,fmt,args);
+	va_end(args);
+	if (need < limit) 
+		name = kobj->name;
+	else {
+		/* 
+		 * Need more space? Allocate it and try again 
+		 */
+		limit = need + 1;
+		name = kmalloc(limit,GFP_KERNEL);
+		if (!name) {
+			error = -ENOMEM;
+			goto Done;
+		}
+		va_start(args,fmt);
+		need = vsnprintf(name,limit,fmt,args);
+		va_end(args);
+
+		/* Still? Give up. */
+		if (need >= limit) {
+			kfree(name);
+			error = -EFAULT;
+			goto Done;
+		}
+	}
+
+	/* Free the old name, if necessary. */
+	if (kobj->k_name && kobj->k_name != kobj->name)
+		kfree(kobj->k_name);
+
+	/* Now, set the new name */
+	kobj->k_name = name;
+ Done:
+	return error;
+}
+
+EXPORT_SYMBOL(kobject_set_name);
+
+
+/**
+ *	kobject_rename - change the name of an object
+ *	@kobj:	object in question.
+ *	@new_name: object's new name
+ */
+
+int kobject_rename(struct kobject * kobj, char *new_name)
+{
+	int error = 0;
+
+	kobj = kobject_get(kobj);
+	if (!kobj)
+		return -EINVAL;
+	error = sysfs_rename_dir(kobj, new_name);
+	kobject_put(kobj);
+
+	return error;
+}
+
+/**
+ *	kobject_del - unlink kobject from hierarchy.
+ * 	@kobj:	object.
+ */
+
+void kobject_del(struct kobject * kobj)
+{
+	kobject_hotplug(kobj, KOBJ_REMOVE);
+	sysfs_remove_dir(kobj);
+	unlink(kobj);
+}
+
+/**
+ *	kobject_unregister - remove object from hierarchy and decrement refcount.
+ *	@kobj:	object going away.
+ */
+
+void kobject_unregister(struct kobject * kobj)
+{
+	pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
+	kobject_del(kobj);
+	kobject_put(kobj);
+}
+
+/**
+ *	kobject_get - increment refcount for object.
+ *	@kobj:	object.
+ */
+
+struct kobject * kobject_get(struct kobject * kobj)
+{
+	if (kobj)
+		kref_get(&kobj->kref);
+	return kobj;
+}
+
+/**
+ *	kobject_cleanup - free kobject resources. 
+ *	@kobj:	object.
+ */
+
+void kobject_cleanup(struct kobject * kobj)
+{
+	struct kobj_type * t = get_ktype(kobj);
+	struct kset * s = kobj->kset;
+	struct kobject * parent = kobj->parent;
+
+	pr_debug("kobject %s: cleaning up\n",kobject_name(kobj));
+	if (kobj->k_name != kobj->name)
+		kfree(kobj->k_name);
+	kobj->k_name = NULL;
+	if (t && t->release)
+		t->release(kobj);
+	if (s)
+		kset_put(s);
+	if (parent) 
+		kobject_put(parent);
+}
+
+static void kobject_release(struct kref *kref)
+{
+	kobject_cleanup(container_of(kref, struct kobject, kref));
+}
+
+/**
+ *	kobject_put - decrement refcount for object.
+ *	@kobj:	object.
+ *
+ *	Decrement the refcount, and if 0, call kobject_cleanup().
+ */
+void kobject_put(struct kobject * kobj)
+{
+	if (kobj)
+		kref_put(&kobj->kref, kobject_release);
+}
+
+
+/**
+ *	kset_init - initialize a kset for use
+ *	@k:	kset 
+ */
+
+void kset_init(struct kset * k)
+{
+	kobject_init(&k->kobj);
+	INIT_LIST_HEAD(&k->list);
+	spin_lock_init(&k->list_lock);
+}
+
+
+/**
+ *	kset_add - add a kset object to the hierarchy.
+ *	@k:	kset.
+ *
+ *	Simply, this adds the kset's embedded kobject to the 
+ *	hierarchy. 
+ *	We also try to make sure that the kset's embedded kobject
+ *	has a parent before it is added. We only care if the embedded
+ *	kobject is not part of a kset itself, since kobject_add()
+ *	assigns a parent in that case. 
+ *	If that is the case, and the kset has a controlling subsystem,
+ *	then we set the kset's parent to be said subsystem. 
+ */
+
+int kset_add(struct kset * k)
+{
+	if (!k->kobj.parent && !k->kobj.kset && k->subsys)
+		k->kobj.parent = &k->subsys->kset.kobj;
+
+	return kobject_add(&k->kobj);
+}
+
+
+/**
+ *	kset_register - initialize and add a kset.
+ *	@k:	kset.
+ */
+
+int kset_register(struct kset * k)
+{
+	kset_init(k);
+	return kset_add(k);
+}
+
+
+/**
+ *	kset_unregister - remove a kset.
+ *	@k:	kset.
+ */
+
+void kset_unregister(struct kset * k)
+{
+	kobject_unregister(&k->kobj);
+}
+
+
+/**
+ *	kset_find_obj - search for object in kset.
+ *	@kset:	kset we're looking in.
+ *	@name:	object's name.
+ *
+ *	Lock kset via @kset->subsys, and iterate over @kset->list,
+ *	looking for a matching kobject. If matching object is found
+ *	take a reference and return the object.
+ */
+
+struct kobject * kset_find_obj(struct kset * kset, const char * name)
+{
+	struct list_head * entry;
+	struct kobject * ret = NULL;
+
+	spin_lock(&kset->list_lock);
+	list_for_each(entry,&kset->list) {
+		struct kobject * k = to_kobj(entry);
+		if (kobject_name(k) && !strcmp(kobject_name(k),name)) {
+			ret = kobject_get(k);
+			break;
+		}
+	}
+	spin_unlock(&kset->list_lock);
+	return ret;
+}
+
+
+void subsystem_init(struct subsystem * s)
+{
+	init_rwsem(&s->rwsem);
+	kset_init(&s->kset);
+}
+
+/**
+ *	subsystem_register - register a subsystem.
+ *	@s:	the subsystem we're registering.
+ *
+ *	Once we register the subsystem, we want to make sure that 
+ *	the kset points back to this subsystem for correct usage of 
+ *	the rwsem. 
+ */
+
+int subsystem_register(struct subsystem * s)
+{
+	int error;
+
+	subsystem_init(s);
+	pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
+
+	if (!(error = kset_add(&s->kset))) {
+		if (!s->kset.subsys)
+			s->kset.subsys = s;
+	}
+	return error;
+}
+
+void subsystem_unregister(struct subsystem * s)
+{
+	pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
+	kset_unregister(&s->kset);
+}
+
+
+/**
+ *	subsystem_create_file - export sysfs attribute file.
+ *	@s:	subsystem.
+ *	@a:	subsystem attribute descriptor.
+ */
+
+int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
+{
+	int error = 0;
+	if (subsys_get(s)) {
+		error = sysfs_create_file(&s->kset.kobj,&a->attr);
+		subsys_put(s);
+	}
+	return error;
+}
+
+
+/**
+ *	subsystem_remove_file - remove sysfs attribute file.
+ *	@s:	subsystem.
+ *	@a:	attribute desciptor.
+ */
+
+void subsys_remove_file(struct subsystem * s, struct subsys_attribute * a)
+{
+	if (subsys_get(s)) {
+		sysfs_remove_file(&s->kset.kobj,&a->attr);
+		subsys_put(s);
+	}
+}
+
+EXPORT_SYMBOL(kobject_init);
+EXPORT_SYMBOL(kobject_register);
+EXPORT_SYMBOL(kobject_unregister);
+EXPORT_SYMBOL(kobject_get);
+EXPORT_SYMBOL(kobject_put);
+EXPORT_SYMBOL(kobject_add);
+EXPORT_SYMBOL(kobject_del);
+
+EXPORT_SYMBOL(kset_register);
+EXPORT_SYMBOL(kset_unregister);
+EXPORT_SYMBOL(kset_find_obj);
+
+EXPORT_SYMBOL(subsystem_init);
+EXPORT_SYMBOL(subsystem_register);
+EXPORT_SYMBOL(subsystem_unregister);
+EXPORT_SYMBOL(subsys_create_file);
+EXPORT_SYMBOL(subsys_remove_file);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
new file mode 100644
index 0000000..2a4e767
--- /dev/null
+++ b/lib/kobject_uevent.c
@@ -0,0 +1,369 @@
+/*
+ * kernel userspace event delivery
+ *
+ * Copyright (C) 2004 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004 Novell, Inc.  All rights reserved.
+ * Copyright (C) 2004 IBM, Inc. All rights reserved.
+ *
+ * Licensed under the GNU GPL v2.
+ *
+ * Authors:
+ *	Robert Love		<rml@novell.com>
+ *	Kay Sievers		<kay.sievers@vrfy.org>
+ *	Arjan van de Ven	<arjanv@redhat.com>
+ *	Greg Kroah-Hartman	<greg@kroah.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/string.h>
+#include <linux/kobject_uevent.h>
+#include <linux/kobject.h>
+#include <net/sock.h>
+
+#define BUFFER_SIZE	1024	/* buffer for the hotplug env */
+#define NUM_ENVP	32	/* number of env pointers */
+
+#if defined(CONFIG_KOBJECT_UEVENT) || defined(CONFIG_HOTPLUG)
+static char *action_to_string(enum kobject_action action)
+{
+	switch (action) {
+	case KOBJ_ADD:
+		return "add";
+	case KOBJ_REMOVE:
+		return "remove";
+	case KOBJ_CHANGE:
+		return "change";
+	case KOBJ_MOUNT:
+		return "mount";
+	case KOBJ_UMOUNT:
+		return "umount";
+	case KOBJ_OFFLINE:
+		return "offline";
+	case KOBJ_ONLINE:
+		return "online";
+	default:
+		return NULL;
+	}
+}
+#endif
+
+#ifdef CONFIG_KOBJECT_UEVENT
+static struct sock *uevent_sock;
+
+/**
+ * send_uevent - notify userspace by sending event trough netlink socket
+ *
+ * @signal: signal name
+ * @obj: object path (kobject)
+ * @envp: possible hotplug environment to pass with the message
+ * @gfp_mask:
+ */
+static int send_uevent(const char *signal, const char *obj,
+		       char **envp, int gfp_mask)
+{
+	struct sk_buff *skb;
+	char *pos;
+	int len;
+
+	if (!uevent_sock)
+		return -EIO;
+
+	len = strlen(signal) + 1;
+	len += strlen(obj) + 1;
+
+	/* allocate buffer with the maximum possible message size */
+	skb = alloc_skb(len + BUFFER_SIZE, gfp_mask);
+	if (!skb)
+		return -ENOMEM;
+
+	pos = skb_put(skb, len);
+	sprintf(pos, "%s@%s", signal, obj);
+
+	/* copy the environment key by key to our continuous buffer */
+	if (envp) {
+		int i;
+
+		for (i = 2; envp[i]; i++) {
+			len = strlen(envp[i]) + 1;
+			pos = skb_put(skb, len);
+			strcpy(pos, envp[i]);
+		}
+	}
+
+	return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask);
+}
+
+static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, 
+			     struct attribute *attr, int gfp_mask)
+{
+	char *path;
+	char *attrpath;
+	char *signal;
+	int len;
+	int rc = -ENOMEM;
+
+	path = kobject_get_path(kobj, gfp_mask);
+	if (!path)
+		return -ENOMEM;
+
+	signal = action_to_string(action);
+	if (!signal)
+		return -EINVAL;
+
+	if (attr) {
+		len = strlen(path);
+		len += strlen(attr->name) + 2;
+		attrpath = kmalloc(len, gfp_mask);
+		if (!attrpath)
+			goto exit;
+		sprintf(attrpath, "%s/%s", path, attr->name);
+		rc = send_uevent(signal, attrpath, NULL, gfp_mask);
+		kfree(attrpath);
+	} else
+		rc = send_uevent(signal, path, NULL, gfp_mask);
+
+exit:
+	kfree(path);
+	return rc;
+}
+
+/**
+ * kobject_uevent - notify userspace by sending event through netlink socket
+ * 
+ * @signal: signal name
+ * @kobj: struct kobject that the event is happening to
+ * @attr: optional struct attribute the event belongs to
+ */
+int kobject_uevent(struct kobject *kobj, enum kobject_action action,
+		   struct attribute *attr)
+{
+	return do_kobject_uevent(kobj, action, attr, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(kobject_uevent);
+
+int kobject_uevent_atomic(struct kobject *kobj, enum kobject_action action,
+			  struct attribute *attr)
+{
+	return do_kobject_uevent(kobj, action, attr, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(kobject_uevent_atomic);
+
+static int __init kobject_uevent_init(void)
+{
+	uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, NULL);
+
+	if (!uevent_sock) {
+		printk(KERN_ERR
+		       "kobject_uevent: unable to create netlink socket!\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+postcore_initcall(kobject_uevent_init);
+
+#else
+static inline int send_uevent(const char *signal, const char *obj,
+			      char **envp, int gfp_mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_KOBJECT_UEVENT */
+
+
+#ifdef CONFIG_HOTPLUG
+char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug";
+u64 hotplug_seqnum;
+static DEFINE_SPINLOCK(sequence_lock);
+
+/**
+ * kobject_hotplug - notify userspace by executing /sbin/hotplug
+ *
+ * @action: action that is happening (usually "ADD" or "REMOVE")
+ * @kobj: struct kobject that the action is happening to
+ */
+void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
+{
+	char *argv [3];
+	char **envp = NULL;
+	char *buffer = NULL;
+	char *seq_buff;
+	char *scratch;
+	int i = 0;
+	int retval;
+	char *kobj_path = NULL;
+	char *name = NULL;
+	char *action_string;
+	u64 seq;
+	struct kobject *top_kobj = kobj;
+	struct kset *kset;
+	static struct kset_hotplug_ops null_hotplug_ops;
+	struct kset_hotplug_ops *hotplug_ops = &null_hotplug_ops;
+
+	/* If this kobj does not belong to a kset,
+	   try to find a parent that does. */
+	if (!top_kobj->kset && top_kobj->parent) {
+		do {
+			top_kobj = top_kobj->parent;
+		} while (!top_kobj->kset && top_kobj->parent);
+	}
+
+	if (top_kobj->kset)
+		kset = top_kobj->kset;
+	else
+		return;
+
+	if (kset->hotplug_ops)
+		hotplug_ops = kset->hotplug_ops;
+
+	/* If the kset has a filter operation, call it.
+	   Skip the event, if the filter returns zero. */
+	if (hotplug_ops->filter) {
+		if (!hotplug_ops->filter(kset, kobj))
+			return;
+	}
+
+	pr_debug ("%s\n", __FUNCTION__);
+
+	action_string = action_to_string(action);
+	if (!action_string)
+		return;
+
+	envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
+	if (!envp)
+		return;
+	memset (envp, 0x00, NUM_ENVP * sizeof (char *));
+
+	buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
+	if (!buffer)
+		goto exit;
+
+	if (hotplug_ops->name)
+		name = hotplug_ops->name(kset, kobj);
+	if (name == NULL)
+		name = kset->kobj.name;
+
+	argv [0] = hotplug_path;
+	argv [1] = name;
+	argv [2] = NULL;
+
+	/* minimal command environment */
+	envp [i++] = "HOME=/";
+	envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+	scratch = buffer;
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "ACTION=%s", action_string) + 1;
+
+	kobj_path = kobject_get_path(kobj, GFP_KERNEL);
+	if (!kobj_path)
+		goto exit;
+
+	envp [i++] = scratch;
+	scratch += sprintf (scratch, "DEVPATH=%s", kobj_path) + 1;
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "SUBSYSTEM=%s", name) + 1;
+
+	/* reserve space for the sequence,
+	 * put the real one in after the hotplug call */
+	envp[i++] = seq_buff = scratch;
+	scratch += strlen("SEQNUM=18446744073709551616") + 1;
+
+	if (hotplug_ops->hotplug) {
+		/* have the kset specific function add its stuff */
+		retval = hotplug_ops->hotplug (kset, kobj,
+				  &envp[i], NUM_ENVP - i, scratch,
+				  BUFFER_SIZE - (scratch - buffer));
+		if (retval) {
+			pr_debug ("%s - hotplug() returned %d\n",
+				  __FUNCTION__, retval);
+			goto exit;
+		}
+	}
+
+	spin_lock(&sequence_lock);
+	seq = ++hotplug_seqnum;
+	spin_unlock(&sequence_lock);
+	sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq);
+
+	pr_debug ("%s: %s %s seq=%llu %s %s %s %s %s\n",
+		  __FUNCTION__, argv[0], argv[1], (unsigned long long)seq,
+		  envp[0], envp[1], envp[2], envp[3], envp[4]);
+
+	send_uevent(action_string, kobj_path, envp, GFP_KERNEL);
+
+	if (!hotplug_path[0])
+		goto exit;
+
+	retval = call_usermodehelper (argv[0], argv, envp, 0);
+	if (retval)
+		pr_debug ("%s - call_usermodehelper returned %d\n",
+			  __FUNCTION__, retval);
+
+exit:
+	kfree(kobj_path);
+	kfree(buffer);
+	kfree(envp);
+	return;
+}
+EXPORT_SYMBOL(kobject_hotplug);
+
+/**
+ * add_hotplug_env_var - helper for creating hotplug environment variables
+ * @envp: Pointer to table of environment variables, as passed into
+ * hotplug() method.
+ * @num_envp: Number of environment variable slots available, as
+ * passed into hotplug() method.
+ * @cur_index: Pointer to current index into @envp.  It should be
+ * initialized to 0 before the first call to add_hotplug_env_var(),
+ * and will be incremented on success.
+ * @buffer: Pointer to buffer for environment variables, as passed
+ * into hotplug() method.
+ * @buffer_size: Length of @buffer, as passed into hotplug() method.
+ * @cur_len: Pointer to current length of space used in @buffer.
+ * Should be initialized to 0 before the first call to
+ * add_hotplug_env_var(), and will be incremented on success.
+ * @format: Format for creating environment variable (of the form
+ * "XXX=%x") for snprintf().
+ *
+ * Returns 0 if environment variable was added successfully or -ENOMEM
+ * if no space was available.
+ */
+int add_hotplug_env_var(char **envp, int num_envp, int *cur_index,
+			char *buffer, int buffer_size, int *cur_len,
+			const char *format, ...)
+{
+	va_list args;
+
+	/*
+	 * We check against num_envp - 1 to make sure there is at
+	 * least one slot left after we return, since the hotplug
+	 * method needs to set the last slot to NULL.
+	 */
+	if (*cur_index >= num_envp - 1)
+		return -ENOMEM;
+
+	envp[*cur_index] = buffer + *cur_len;
+
+	va_start(args, format);
+	*cur_len += vsnprintf(envp[*cur_index],
+			      max(buffer_size - *cur_len, 0),
+			      format, args) + 1;
+	va_end(args);
+
+	if (*cur_len > buffer_size)
+		return -ENOMEM;
+
+	(*cur_index)++;
+	return 0;
+}
+EXPORT_SYMBOL(add_hotplug_env_var);
+
+#endif /* CONFIG_HOTPLUG */
diff --git a/lib/kref.c b/lib/kref.c
new file mode 100644
index 0000000..0d07cc3
--- /dev/null
+++ b/lib/kref.c
@@ -0,0 +1,64 @@
+/*
+ * kref.c - library routines for handling generic reference counted objects
+ *
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2004 IBM Corp.
+ *
+ * based on lib/kobject.c which was:
+ * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/module.h>
+
+/**
+ * kref_init - initialize object.
+ * @kref: object in question.
+ */
+void kref_init(struct kref *kref)
+{
+	atomic_set(&kref->refcount,1);
+}
+
+/**
+ * kref_get - increment refcount for object.
+ * @kref: object.
+ */
+void kref_get(struct kref *kref)
+{
+	WARN_ON(!atomic_read(&kref->refcount));
+	atomic_inc(&kref->refcount);
+}
+
+/**
+ * kref_put - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.
+ *
+ * Decrement the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0.  Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory.  Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+	WARN_ON(release == NULL);
+	WARN_ON(release == (void (*)(struct kref *))kfree);
+
+	if (atomic_dec_and_test(&kref->refcount)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(kref_init);
+EXPORT_SYMBOL(kref_get);
+EXPORT_SYMBOL(kref_put);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
new file mode 100644
index 0000000..52b6dc1
--- /dev/null
+++ b/lib/libcrc32c.c
@@ -0,0 +1,200 @@
+/* 
+ * CRC32C
+ *@Article{castagnoli-crc,
+ * author =       { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
+ * title =        {{Optimization of Cyclic Redundancy-Check Codes with 24
+ *                 and 32 Parity Bits}},
+ * journal =      IEEE Transactions on Communication,
+ * year =         {1993},
+ * volume =       {41},
+ * number =       {6},
+ * pages =        {},
+ * month =        {June},
+ *}
+ * Used by the iSCSI driver, possibly others, and derived from the
+ * the iscsi-crc.c module of the linux-iscsi driver at
+ * http://linux-iscsi.sourceforge.net.
+ *
+ * Following the example of lib/crc32, this function is intended to be
+ * flexible and useful for all users.  Modules that currently have their
+ * own crc32c, but hopefully may be able to use this one are:
+ *  net/sctp (please add all your doco to here if you change to
+ *            use this one!)
+ *  <endoflist>
+ *
+ * Copyright (c) 2004 Cisco Systems, Inc.
+ * 
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+#include <linux/crc32c.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+
+MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
+MODULE_LICENSE("GPL");
+
+#define CRC32C_POLY_BE 0x1EDC6F41
+#define CRC32C_POLY_LE 0x82F63B78
+
+#ifndef CRC_LE_BITS 
+# define CRC_LE_BITS 8
+#endif
+
+
+/*
+ * Haven't generated a big-endian table yet, but the bit-wise version
+ * should at least work.
+ */
+#if defined CRC_BE_BITS && CRC_BE_BITS != 1
+#undef CRC_BE_BITS
+#endif
+#ifndef CRC_BE_BITS
+# define CRC_BE_BITS 1
+#endif
+
+EXPORT_SYMBOL(crc32c_le);
+
+#if CRC_LE_BITS == 1
+/*
+ * Compute things bit-wise, as done in crc32.c.  We could share the tight 
+ * loop below with crc32 and vary the POLY if we don't find value in terms
+ * of space and maintainability in keeping the two modules separate.
+ */
+u32 __attribute_pure__
+crc32c_le(u32 crc, unsigned char const *p, size_t len)
+{
+	int i;
+	while (len--) {
+		crc ^= *p++;
+		for (i = 0; i < 8; i++)
+			crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0);
+	}
+	return crc;
+}
+#else
+
+/*
+ * This is the CRC-32C table
+ * Generated with:
+ * width = 32 bits
+ * poly = 0x1EDC6F41
+ * reflect input bytes = true
+ * reflect output bytes = true
+ */
+
+static u32 crc32c_table[256] = {
+	0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
+	0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
+	0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
+	0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
+	0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
+	0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
+	0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
+	0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
+	0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
+	0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
+	0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
+	0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
+	0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
+	0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
+	0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
+	0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
+	0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
+	0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
+	0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
+	0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
+	0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
+	0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
+	0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
+	0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
+	0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
+	0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
+	0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
+	0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
+	0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
+	0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
+	0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
+	0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
+	0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
+	0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
+	0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
+	0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
+	0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
+	0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
+	0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
+	0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
+	0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
+	0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
+	0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
+	0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
+	0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
+	0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
+	0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
+	0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
+	0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
+	0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
+	0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
+	0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
+	0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
+	0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
+	0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
+	0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
+	0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
+	0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
+	0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
+	0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
+	0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
+	0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
+	0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
+	0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
+};
+
+/*
+ * Steps through buffer one byte at at time, calculates reflected 
+ * crc using table.
+ */
+
+u32 __attribute_pure__
+crc32c_le(u32 seed, unsigned char const *data, size_t length)
+{
+	u32 crc = __cpu_to_le32(seed);
+	
+	while (length--)
+		crc =
+		    crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
+
+	return __le32_to_cpu(crc);
+}
+
+#endif	/* CRC_LE_BITS == 8 */
+
+EXPORT_SYMBOL(crc32c_be);
+
+#if CRC_BE_BITS == 1
+u32 __attribute_pure__
+crc32c_be(u32 crc, unsigned char const *p, size_t len)
+{
+	int i;
+	while (len--) {
+		crc ^= *p++ << 24;
+		for (i = 0; i < 8; i++)
+			crc =
+			    (crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE :
+					  0);
+	}
+	return crc;
+}
+#endif
+
+/*
+ * Unit test
+ *
+ * A small unit test suite is implemented as part of the crypto suite.
+ * Select CRYPTO_CRC32C and use the tcrypt module to run the tests.
+ */
diff --git a/lib/parser.c b/lib/parser.c
new file mode 100644
index 0000000..7ad2a48
--- /dev/null
+++ b/lib/parser.c
@@ -0,0 +1,220 @@
+/*
+ * lib/parser.c - simple parser for mount, etc. options.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/**
+ * match_one: - Determines if a string matches a simple pattern
+ * @s: the string to examine for presense of the pattern
+ * @p: the string containing the pattern
+ * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
+ * locations.
+ *
+ * Description: Determines if the pattern @p is present in string @s. Can only
+ * match extremely simple token=arg style patterns. If the pattern is found,
+ * the location(s) of the arguments will be returned in the @args array.
+ */
+static int match_one(char *s, char *p, substring_t args[])
+{
+	char *meta;
+	int argc = 0;
+
+	if (!p)
+		return 1;
+
+	while(1) {
+		int len = -1;
+		meta = strchr(p, '%');
+		if (!meta)
+			return strcmp(p, s) == 0;
+
+		if (strncmp(p, s, meta-p))
+			return 0;
+
+		s += meta - p;
+		p = meta + 1;
+
+		if (isdigit(*p))
+			len = simple_strtoul(p, &p, 10);
+		else if (*p == '%') {
+			if (*s++ != '%')
+				return 0;
+			p++;
+			continue;
+		}
+
+		if (argc >= MAX_OPT_ARGS)
+			return 0;
+
+		args[argc].from = s;
+		switch (*p++) {
+		case 's':
+			if (strlen(s) == 0)
+				return 0;
+			else if (len == -1 || len > strlen(s))
+				len = strlen(s);
+			args[argc].to = s + len;
+			break;
+		case 'd':
+			simple_strtol(s, &args[argc].to, 0);
+			goto num;
+		case 'u':
+			simple_strtoul(s, &args[argc].to, 0);
+			goto num;
+		case 'o':
+			simple_strtoul(s, &args[argc].to, 8);
+			goto num;
+		case 'x':
+			simple_strtoul(s, &args[argc].to, 16);
+		num:
+			if (args[argc].to == args[argc].from)
+				return 0;
+			break;
+		default:
+			return 0;
+		}
+		s = args[argc].to;
+		argc++;
+	}
+}
+
+/**
+ * match_token: - Find a token (and optional args) in a string
+ * @s: the string to examine for token/argument pairs
+ * @table: match_table_t describing the set of allowed option tokens and the
+ * arguments that may be associated with them. Must be terminated with a
+ * &struct match_token whose pattern is set to the NULL pointer.
+ * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
+ * locations.
+ *
+ * Description: Detects which if any of a set of token strings has been passed
+ * to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style
+ * format identifiers which will be taken into account when matching the
+ * tokens, and whose locations will be returned in the @args array.
+ */
+int match_token(char *s, match_table_t table, substring_t args[])
+{
+	struct match_token *p;
+
+	for (p = table; !match_one(s, p->pattern, args) ; p++)
+		;
+
+	return p->token;
+}
+
+/**
+ * match_number: scan a number in the given base from a substring_t
+ * @s: substring to be scanned
+ * @result: resulting integer on success
+ * @base: base to use when converting string
+ *
+ * Description: Given a &substring_t and a base, attempts to parse the substring
+ * as a number in that base. On success, sets @result to the integer represented
+ * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure.
+ */
+static int match_number(substring_t *s, int *result, int base)
+{
+	char *endp;
+	char *buf;
+	int ret;
+
+	buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	memcpy(buf, s->from, s->to - s->from);
+	buf[s->to - s->from] = '\0';
+	*result = simple_strtol(buf, &endp, base);
+	ret = 0;
+	if (endp == buf)
+		ret = -EINVAL;
+	kfree(buf);
+	return ret;
+}
+
+/**
+ * match_int: - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer. On
+ * success, sets @result to the integer represented by the string and returns 0.
+ * Returns either -ENOMEM or -EINVAL on failure.
+ */
+int match_int(substring_t *s, int *result)
+{
+	return match_number(s, result, 0);
+}
+
+/**
+ * match_octal: - scan an octal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as an octal integer. On
+ * success, sets @result to the integer represented by the string and returns
+ * 0. Returns either -ENOMEM or -EINVAL on failure.
+ */
+int match_octal(substring_t *s, int *result)
+{
+	return match_number(s, result, 8);
+}
+
+/**
+ * match_hex: - scan a hex representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
+ * On success, sets @result to the integer represented by the string and
+ * returns 0. Returns either -ENOMEM or -EINVAL on failure.
+ */
+int match_hex(substring_t *s, int *result)
+{
+	return match_number(s, result, 16);
+}
+
+/**
+ * match_strcpy: - copies the characters from a substring_t to a string
+ * @to: string to copy characters to.
+ * @s: &substring_t to copy
+ *
+ * Description: Copies the set of characters represented by the given
+ * &substring_t @s to the c-style string @to. Caller guarantees that @to is
+ * large enough to hold the characters of @s.
+ */
+void match_strcpy(char *to, substring_t *s)
+{
+	memcpy(to, s->from, s->to - s->from);
+	to[s->to - s->from] = '\0';
+}
+
+/**
+ * match_strdup: - allocate a new string with the contents of a substring_t
+ * @s: &substring_t to copy
+ *
+ * Description: Allocates and returns a string filled with the contents of
+ * the &substring_t @s. The caller is responsible for freeing the returned
+ * string with kfree().
+ */
+char *match_strdup(substring_t *s)
+{
+	char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+	if (p)
+		match_strcpy(p, s);
+	return p;
+}
+
+EXPORT_SYMBOL(match_token);
+EXPORT_SYMBOL(match_int);
+EXPORT_SYMBOL(match_octal);
+EXPORT_SYMBOL(match_hex);
+EXPORT_SYMBOL(match_strcpy);
+EXPORT_SYMBOL(match_strdup);
diff --git a/lib/prio_tree.c b/lib/prio_tree.c
new file mode 100644
index 0000000..ccfd850
--- /dev/null
+++ b/lib/prio_tree.c
@@ -0,0 +1,484 @@
+/*
+ * lib/prio_tree.c - priority search tree
+ *
+ * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
+ *
+ * This file is released under the GPL v2.
+ *
+ * Based on the radix priority search tree proposed by Edward M. McCreight
+ * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
+ *
+ * 02Feb2004	Initial version
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/prio_tree.h>
+
+/*
+ * A clever mix of heap and radix trees forms a radix priority search tree (PST)
+ * which is useful for storing intervals, e.g, we can consider a vma as a closed
+ * interval of file pages [offset_begin, offset_end], and store all vmas that
+ * map a file in a PST. Then, using the PST, we can answer a stabbing query,
+ * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a
+ * given input interval X (a set of consecutive file pages), in "O(log n + m)"
+ * time where 'log n' is the height of the PST, and 'm' is the number of stored
+ * intervals (vmas) that overlap (map) with the input interval X (the set of
+ * consecutive file pages).
+ *
+ * In our implementation, we store closed intervals of the form [radix_index,
+ * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST
+ * is designed for storing intervals with unique radix indices, i.e., each
+ * interval have different radix_index. However, this limitation can be easily
+ * overcome by using the size, i.e., heap_index - radix_index, as part of the
+ * index, so we index the tree using [(radix_index,size), heap_index].
+ *
+ * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit
+ * machine, the maximum height of a PST can be 64. We can use a balanced version
+ * of the priority search tree to optimize the tree height, but the balanced
+ * tree proposed by McCreight is too complex and memory-hungry for our purpose.
+ */
+
+/*
+ * The following macros are used for implementing prio_tree for i_mmap
+ */
+
+#define RADIX_INDEX(vma)  ((vma)->vm_pgoff)
+#define VMA_SIZE(vma)	  (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
+/* avoid overflow */
+#define HEAP_INDEX(vma)	  ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
+
+
+static void get_index(const struct prio_tree_root *root,
+    const struct prio_tree_node *node,
+    unsigned long *radix, unsigned long *heap)
+{
+	if (root->raw) {
+		struct vm_area_struct *vma = prio_tree_entry(
+		    node, struct vm_area_struct, shared.prio_tree_node);
+
+		*radix = RADIX_INDEX(vma);
+		*heap = HEAP_INDEX(vma);
+	}
+	else {
+		*radix = node->start;
+		*heap = node->last;
+	}
+}
+
+static unsigned long index_bits_to_maxindex[BITS_PER_LONG];
+
+void __init prio_tree_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++)
+		index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1;
+	index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL;
+}
+
+/*
+ * Maximum heap_index that can be stored in a PST with index_bits bits
+ */
+static inline unsigned long prio_tree_maxindex(unsigned int bits)
+{
+	return index_bits_to_maxindex[bits - 1];
+}
+
+/*
+ * Extend a priority search tree so that it can store a node with heap_index
+ * max_heap_index. In the worst case, this algorithm takes O((log n)^2).
+ * However, this function is used rarely and the common case performance is
+ * not bad.
+ */
+static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
+		struct prio_tree_node *node, unsigned long max_heap_index)
+{
+	struct prio_tree_node *first = NULL, *prev, *last = NULL;
+
+	if (max_heap_index > prio_tree_maxindex(root->index_bits))
+		root->index_bits++;
+
+	while (max_heap_index > prio_tree_maxindex(root->index_bits)) {
+		root->index_bits++;
+
+		if (prio_tree_empty(root))
+			continue;
+
+		if (first == NULL) {
+			first = root->prio_tree_node;
+			prio_tree_remove(root, root->prio_tree_node);
+			INIT_PRIO_TREE_NODE(first);
+			last = first;
+		} else {
+			prev = last;
+			last = root->prio_tree_node;
+			prio_tree_remove(root, root->prio_tree_node);
+			INIT_PRIO_TREE_NODE(last);
+			prev->left = last;
+			last->parent = prev;
+		}
+	}
+
+	INIT_PRIO_TREE_NODE(node);
+
+	if (first) {
+		node->left = first;
+		first->parent = node;
+	} else
+		last = node;
+
+	if (!prio_tree_empty(root)) {
+		last->left = root->prio_tree_node;
+		last->left->parent = last;
+	}
+
+	root->prio_tree_node = node;
+	return node;
+}
+
+/*
+ * Replace a prio_tree_node with a new node and return the old node
+ */
+struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
+		struct prio_tree_node *old, struct prio_tree_node *node)
+{
+	INIT_PRIO_TREE_NODE(node);
+
+	if (prio_tree_root(old)) {
+		BUG_ON(root->prio_tree_node != old);
+		/*
+		 * We can reduce root->index_bits here. However, it is complex
+		 * and does not help much to improve performance (IMO).
+		 */
+		node->parent = node;
+		root->prio_tree_node = node;
+	} else {
+		node->parent = old->parent;
+		if (old->parent->left == old)
+			old->parent->left = node;
+		else
+			old->parent->right = node;
+	}
+
+	if (!prio_tree_left_empty(old)) {
+		node->left = old->left;
+		old->left->parent = node;
+	}
+
+	if (!prio_tree_right_empty(old)) {
+		node->right = old->right;
+		old->right->parent = node;
+	}
+
+	return old;
+}
+
+/*
+ * Insert a prio_tree_node @node into a radix priority search tree @root. The
+ * algorithm typically takes O(log n) time where 'log n' is the number of bits
+ * required to represent the maximum heap_index. In the worst case, the algo
+ * can take O((log n)^2) - check prio_tree_expand.
+ *
+ * If a prior node with same radix_index and heap_index is already found in
+ * the tree, then returns the address of the prior node. Otherwise, inserts
+ * @node into the tree and returns @node.
+ */
+struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
+		struct prio_tree_node *node)
+{
+	struct prio_tree_node *cur, *res = node;
+	unsigned long radix_index, heap_index;
+	unsigned long r_index, h_index, index, mask;
+	int size_flag = 0;
+
+	get_index(root, node, &radix_index, &heap_index);
+
+	if (prio_tree_empty(root) ||
+			heap_index > prio_tree_maxindex(root->index_bits))
+		return prio_tree_expand(root, node, heap_index);
+
+	cur = root->prio_tree_node;
+	mask = 1UL << (root->index_bits - 1);
+
+	while (mask) {
+		get_index(root, cur, &r_index, &h_index);
+
+		if (r_index == radix_index && h_index == heap_index)
+			return cur;
+
+                if (h_index < heap_index ||
+		    (h_index == heap_index && r_index > radix_index)) {
+			struct prio_tree_node *tmp = node;
+			node = prio_tree_replace(root, cur, node);
+			cur = tmp;
+			/* swap indices */
+			index = r_index;
+			r_index = radix_index;
+			radix_index = index;
+			index = h_index;
+			h_index = heap_index;
+			heap_index = index;
+		}
+
+		if (size_flag)
+			index = heap_index - radix_index;
+		else
+			index = radix_index;
+
+		if (index & mask) {
+			if (prio_tree_right_empty(cur)) {
+				INIT_PRIO_TREE_NODE(node);
+				cur->right = node;
+				node->parent = cur;
+				return res;
+			} else
+				cur = cur->right;
+		} else {
+			if (prio_tree_left_empty(cur)) {
+				INIT_PRIO_TREE_NODE(node);
+				cur->left = node;
+				node->parent = cur;
+				return res;
+			} else
+				cur = cur->left;
+		}
+
+		mask >>= 1;
+
+		if (!mask) {
+			mask = 1UL << (BITS_PER_LONG - 1);
+			size_flag = 1;
+		}
+	}
+	/* Should not reach here */
+	BUG();
+	return NULL;
+}
+
+/*
+ * Remove a prio_tree_node @node from a radix priority search tree @root. The
+ * algorithm takes O(log n) time where 'log n' is the number of bits required
+ * to represent the maximum heap_index.
+ */
+void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node)
+{
+	struct prio_tree_node *cur;
+	unsigned long r_index, h_index_right, h_index_left;
+
+	cur = node;
+
+	while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) {
+		if (!prio_tree_left_empty(cur))
+			get_index(root, cur->left, &r_index, &h_index_left);
+		else {
+			cur = cur->right;
+			continue;
+		}
+
+		if (!prio_tree_right_empty(cur))
+			get_index(root, cur->right, &r_index, &h_index_right);
+		else {
+			cur = cur->left;
+			continue;
+		}
+
+		/* both h_index_left and h_index_right cannot be 0 */
+		if (h_index_left >= h_index_right)
+			cur = cur->left;
+		else
+			cur = cur->right;
+	}
+
+	if (prio_tree_root(cur)) {
+		BUG_ON(root->prio_tree_node != cur);
+		__INIT_PRIO_TREE_ROOT(root, root->raw);
+		return;
+	}
+
+	if (cur->parent->right == cur)
+		cur->parent->right = cur->parent;
+	else
+		cur->parent->left = cur->parent;
+
+	while (cur != node)
+		cur = prio_tree_replace(root, cur->parent, cur);
+}
+
+/*
+ * Following functions help to enumerate all prio_tree_nodes in the tree that
+ * overlap with the input interval X [radix_index, heap_index]. The enumeration
+ * takes O(log n + m) time where 'log n' is the height of the tree (which is
+ * proportional to # of bits required to represent the maximum heap_index) and
+ * 'm' is the number of prio_tree_nodes that overlap the interval X.
+ */
+
+static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
+		unsigned long *r_index, unsigned long *h_index)
+{
+	if (prio_tree_left_empty(iter->cur))
+		return NULL;
+
+	get_index(iter->root, iter->cur->left, r_index, h_index);
+
+	if (iter->r_index <= *h_index) {
+		iter->cur = iter->cur->left;
+		iter->mask >>= 1;
+		if (iter->mask) {
+			if (iter->size_level)
+				iter->size_level++;
+		} else {
+			if (iter->size_level) {
+				BUG_ON(!prio_tree_left_empty(iter->cur));
+				BUG_ON(!prio_tree_right_empty(iter->cur));
+				iter->size_level++;
+				iter->mask = ULONG_MAX;
+			} else {
+				iter->size_level = 1;
+				iter->mask = 1UL << (BITS_PER_LONG - 1);
+			}
+		}
+		return iter->cur;
+	}
+
+	return NULL;
+}
+
+static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
+		unsigned long *r_index, unsigned long *h_index)
+{
+	unsigned long value;
+
+	if (prio_tree_right_empty(iter->cur))
+		return NULL;
+
+	if (iter->size_level)
+		value = iter->value;
+	else
+		value = iter->value | iter->mask;
+
+	if (iter->h_index < value)
+		return NULL;
+
+	get_index(iter->root, iter->cur->right, r_index, h_index);
+
+	if (iter->r_index <= *h_index) {
+		iter->cur = iter->cur->right;
+		iter->mask >>= 1;
+		iter->value = value;
+		if (iter->mask) {
+			if (iter->size_level)
+				iter->size_level++;
+		} else {
+			if (iter->size_level) {
+				BUG_ON(!prio_tree_left_empty(iter->cur));
+				BUG_ON(!prio_tree_right_empty(iter->cur));
+				iter->size_level++;
+				iter->mask = ULONG_MAX;
+			} else {
+				iter->size_level = 1;
+				iter->mask = 1UL << (BITS_PER_LONG - 1);
+			}
+		}
+		return iter->cur;
+	}
+
+	return NULL;
+}
+
+static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter)
+{
+	iter->cur = iter->cur->parent;
+	if (iter->mask == ULONG_MAX)
+		iter->mask = 1UL;
+	else if (iter->size_level == 1)
+		iter->mask = 1UL;
+	else
+		iter->mask <<= 1;
+	if (iter->size_level)
+		iter->size_level--;
+	if (!iter->size_level && (iter->value & iter->mask))
+		iter->value ^= iter->mask;
+	return iter->cur;
+}
+
+static inline int overlap(struct prio_tree_iter *iter,
+		unsigned long r_index, unsigned long h_index)
+{
+	return iter->h_index >= r_index && iter->r_index <= h_index;
+}
+
+/*
+ * prio_tree_first:
+ *
+ * Get the first prio_tree_node that overlaps with the interval [radix_index,
+ * heap_index]. Note that always radix_index <= heap_index. We do a pre-order
+ * traversal of the tree.
+ */
+static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
+{
+	struct prio_tree_root *root;
+	unsigned long r_index, h_index;
+
+	INIT_PRIO_TREE_ITER(iter);
+
+	root = iter->root;
+	if (prio_tree_empty(root))
+		return NULL;
+
+	get_index(root, root->prio_tree_node, &r_index, &h_index);
+
+	if (iter->r_index > h_index)
+		return NULL;
+
+	iter->mask = 1UL << (root->index_bits - 1);
+	iter->cur = root->prio_tree_node;
+
+	while (1) {
+		if (overlap(iter, r_index, h_index))
+			return iter->cur;
+
+		if (prio_tree_left(iter, &r_index, &h_index))
+			continue;
+
+		if (prio_tree_right(iter, &r_index, &h_index))
+			continue;
+
+		break;
+	}
+	return NULL;
+}
+
+/*
+ * prio_tree_next:
+ *
+ * Get the next prio_tree_node that overlaps with the input interval in iter
+ */
+struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
+{
+	unsigned long r_index, h_index;
+
+	if (iter->cur == NULL)
+		return prio_tree_first(iter);
+
+repeat:
+	while (prio_tree_left(iter, &r_index, &h_index))
+		if (overlap(iter, r_index, h_index))
+			return iter->cur;
+
+	while (!prio_tree_right(iter, &r_index, &h_index)) {
+	    	while (!prio_tree_root(iter->cur) &&
+				iter->cur->parent->right == iter->cur)
+			prio_tree_parent(iter);
+
+		if (prio_tree_root(iter->cur))
+			return NULL;
+
+		prio_tree_parent(iter);
+	}
+
+	if (overlap(iter, r_index, h_index))
+		return iter->cur;
+
+	goto repeat;
+}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
new file mode 100644
index 0000000..04d6643
--- /dev/null
+++ b/lib/radix-tree.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright (C) 2001 Momchil Velikov
+ * Portions Copyright (C) 2001 Christoph Hellwig
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/radix-tree.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+
+#ifdef __KERNEL__
+#define RADIX_TREE_MAP_SHIFT	6
+#else
+#define RADIX_TREE_MAP_SHIFT	3	/* For more stressful testing */
+#endif
+#define RADIX_TREE_TAGS		2
+
+#define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
+#define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
+
+#define RADIX_TREE_TAG_LONGS	\
+	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+struct radix_tree_node {
+	unsigned int	count;
+	void		*slots[RADIX_TREE_MAP_SIZE];
+	unsigned long	tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS];
+};
+
+struct radix_tree_path {
+	struct radix_tree_node *node, **slot;
+	int offset;
+};
+
+#define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2)
+
+static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH];
+
+/*
+ * Radix tree node cache.
+ */
+static kmem_cache_t *radix_tree_node_cachep;
+
+/*
+ * Per-cpu pool of preloaded nodes
+ */
+struct radix_tree_preload {
+	int nr;
+	struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+};
+DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+
+/*
+ * This assumes that the caller has performed appropriate preallocation, and
+ * that the caller has pinned this thread of control to the current CPU.
+ */
+static struct radix_tree_node *
+radix_tree_node_alloc(struct radix_tree_root *root)
+{
+	struct radix_tree_node *ret;
+
+	ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask);
+	if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) {
+		struct radix_tree_preload *rtp;
+
+		rtp = &__get_cpu_var(radix_tree_preloads);
+		if (rtp->nr) {
+			ret = rtp->nodes[rtp->nr - 1];
+			rtp->nodes[rtp->nr - 1] = NULL;
+			rtp->nr--;
+		}
+	}
+	return ret;
+}
+
+static inline void
+radix_tree_node_free(struct radix_tree_node *node)
+{
+	kmem_cache_free(radix_tree_node_cachep, node);
+}
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail.  On
+ * success, return zero, with preemption disabled.  On error, return -ENOMEM
+ * with preemption not disabled.
+ */
+int radix_tree_preload(int gfp_mask)
+{
+	struct radix_tree_preload *rtp;
+	struct radix_tree_node *node;
+	int ret = -ENOMEM;
+
+	preempt_disable();
+	rtp = &__get_cpu_var(radix_tree_preloads);
+	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
+		preempt_enable();
+		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+		if (node == NULL)
+			goto out;
+		preempt_disable();
+		rtp = &__get_cpu_var(radix_tree_preloads);
+		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
+			rtp->nodes[rtp->nr++] = node;
+		else
+			kmem_cache_free(radix_tree_node_cachep, node);
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+static inline void tag_set(struct radix_tree_node *node, int tag, int offset)
+{
+	if (!test_bit(offset, &node->tags[tag][0]))
+		__set_bit(offset, &node->tags[tag][0]);
+}
+
+static inline void tag_clear(struct radix_tree_node *node, int tag, int offset)
+{
+	__clear_bit(offset, &node->tags[tag][0]);
+}
+
+static inline int tag_get(struct radix_tree_node *node, int tag, int offset)
+{
+	return test_bit(offset, &node->tags[tag][0]);
+}
+
+/*
+ *	Return the maximum key which can be store into a
+ *	radix tree with height HEIGHT.
+ */
+static inline unsigned long radix_tree_maxindex(unsigned int height)
+{
+	return height_to_maxindex[height];
+}
+
+/*
+ *	Extend a radix tree so it can store key @index.
+ */
+static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
+{
+	struct radix_tree_node *node;
+	unsigned int height;
+	char tags[RADIX_TREE_TAGS];
+	int tag;
+
+	/* Figure out what the height should be.  */
+	height = root->height + 1;
+	while (index > radix_tree_maxindex(height))
+		height++;
+
+	if (root->rnode == NULL) {
+		root->height = height;
+		goto out;
+	}
+
+	/*
+	 * Prepare the tag status of the top-level node for propagation
+	 * into the newly-pushed top-level node(s)
+	 */
+	for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
+		int idx;
+
+		tags[tag] = 0;
+		for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+			if (root->rnode->tags[tag][idx]) {
+				tags[tag] = 1;
+				break;
+			}
+		}
+	}
+
+	do {
+		if (!(node = radix_tree_node_alloc(root)))
+			return -ENOMEM;
+
+		/* Increase the height.  */
+		node->slots[0] = root->rnode;
+
+		/* Propagate the aggregated tag info into the new root */
+		for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
+			if (tags[tag])
+				tag_set(node, tag, 0);
+		}
+
+		node->count = 1;
+		root->rnode = node;
+		root->height++;
+	} while (height > root->height);
+out:
+	return 0;
+}
+
+/**
+ *	radix_tree_insert    -    insert into a radix tree
+ *	@root:		radix tree root
+ *	@index:		index key
+ *	@item:		item to insert
+ *
+ *	Insert an item into the radix tree at position @index.
+ */
+int radix_tree_insert(struct radix_tree_root *root,
+			unsigned long index, void *item)
+{
+	struct radix_tree_node *node = NULL, *tmp, **slot;
+	unsigned int height, shift;
+	int offset;
+	int error;
+
+	/* Make sure the tree is high enough.  */
+	if ((!index && !root->rnode) ||
+			index > radix_tree_maxindex(root->height)) {
+		error = radix_tree_extend(root, index);
+		if (error)
+			return error;
+	}
+
+	slot = &root->rnode;
+	height = root->height;
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	offset = 0;			/* uninitialised var warning */
+	while (height > 0) {
+		if (*slot == NULL) {
+			/* Have to add a child node.  */
+			if (!(tmp = radix_tree_node_alloc(root)))
+				return -ENOMEM;
+			*slot = tmp;
+			if (node)
+				node->count++;
+		}
+
+		/* Go a level down */
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+		node = *slot;
+		slot = (struct radix_tree_node **)(node->slots + offset);
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	if (*slot != NULL)
+		return -EEXIST;
+	if (node) {
+		node->count++;
+		BUG_ON(tag_get(node, 0, offset));
+		BUG_ON(tag_get(node, 1, offset));
+	}
+
+	*slot = item;
+	return 0;
+}
+EXPORT_SYMBOL(radix_tree_insert);
+
+/**
+ *	radix_tree_lookup    -    perform lookup operation on a radix tree
+ *	@root:		radix tree root
+ *	@index:		index key
+ *
+ *	Lookup the item at the position @index in the radix tree @root.
+ */
+void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
+{
+	unsigned int height, shift;
+	struct radix_tree_node **slot;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+	slot = &root->rnode;
+
+	while (height > 0) {
+		if (*slot == NULL)
+			return NULL;
+
+		slot = (struct radix_tree_node **)
+			((*slot)->slots +
+				((index >> shift) & RADIX_TREE_MAP_MASK));
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	return *slot;
+}
+EXPORT_SYMBOL(radix_tree_lookup);
+
+/**
+ *	radix_tree_tag_set - set a tag on a radix tree node
+ *	@root:		radix tree root
+ *	@index:		index key
+ *	@tag: 		tag index
+ *
+ *	Set the search tag corresponging to @index in the radix tree.  From
+ *	the root all the way down to the leaf node.
+ *
+ *	Returns the address of the tagged item.   Setting a tag on a not-present
+ *	item is a bug.
+ */
+void *radix_tree_tag_set(struct radix_tree_root *root,
+			unsigned long index, int tag)
+{
+	unsigned int height, shift;
+	struct radix_tree_node **slot;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	slot = &root->rnode;
+
+	while (height > 0) {
+		int offset;
+
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+		tag_set(*slot, tag, offset);
+		slot = (struct radix_tree_node **)((*slot)->slots + offset);
+		BUG_ON(*slot == NULL);
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	return *slot;
+}
+EXPORT_SYMBOL(radix_tree_tag_set);
+
+/**
+ *	radix_tree_tag_clear - clear a tag on a radix tree node
+ *	@root:		radix tree root
+ *	@index:		index key
+ *	@tag: 		tag index
+ *
+ *	Clear the search tag corresponging to @index in the radix tree.  If
+ *	this causes the leaf node to have no tags set then clear the tag in the
+ *	next-to-leaf node, etc.
+ *
+ *	Returns the address of the tagged item on success, else NULL.  ie:
+ *	has the same return value and semantics as radix_tree_lookup().
+ */
+void *radix_tree_tag_clear(struct radix_tree_root *root,
+			unsigned long index, int tag)
+{
+	struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
+	unsigned int height, shift;
+	void *ret = NULL;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		goto out;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	pathp->node = NULL;
+	pathp->slot = &root->rnode;
+
+	while (height > 0) {
+		int offset;
+
+		if (*pathp->slot == NULL)
+			goto out;
+
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+		pathp[1].offset = offset;
+		pathp[1].node = *pathp[0].slot;
+		pathp[1].slot = (struct radix_tree_node **)
+				(pathp[1].node->slots + offset);
+		pathp++;
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	ret = *pathp[0].slot;
+	if (ret == NULL)
+		goto out;
+
+	do {
+		int idx;
+
+		tag_clear(pathp[0].node, tag, pathp[0].offset);
+		for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+			if (pathp[0].node->tags[tag][idx])
+				goto out;
+		}
+		pathp--;
+	} while (pathp[0].node);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_tag_clear);
+
+#ifndef __KERNEL__	/* Only the test harness uses this at present */
+/**
+ *	radix_tree_tag_get - get a tag on a radix tree node
+ *	@root:		radix tree root
+ *	@index:		index key
+ *	@tag: 		tag index
+ *
+ *	Return the search tag corresponging to @index in the radix tree.
+ *
+ *	Returns zero if the tag is unset, or if there is no corresponding item
+ *	in the tree.
+ */
+int radix_tree_tag_get(struct radix_tree_root *root,
+			unsigned long index, int tag)
+{
+	unsigned int height, shift;
+	struct radix_tree_node **slot;
+	int saw_unset_tag = 0;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		return 0;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	slot = &root->rnode;
+
+	for ( ; ; ) {
+		int offset;
+
+		if (*slot == NULL)
+			return 0;
+
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+
+		/*
+		 * This is just a debug check.  Later, we can bale as soon as
+		 * we see an unset tag.
+		 */
+		if (!tag_get(*slot, tag, offset))
+			saw_unset_tag = 1;
+		if (height == 1) {
+			int ret = tag_get(*slot, tag, offset);
+
+			BUG_ON(ret && saw_unset_tag);
+			return ret;
+		}
+		slot = (struct radix_tree_node **)((*slot)->slots + offset);
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+}
+EXPORT_SYMBOL(radix_tree_tag_get);
+#endif
+
+static unsigned int
+__lookup(struct radix_tree_root *root, void **results, unsigned long index,
+	unsigned int max_items, unsigned long *next_index)
+{
+	unsigned int nr_found = 0;
+	unsigned int shift;
+	unsigned int height = root->height;
+	struct radix_tree_node *slot;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+	slot = root->rnode;
+
+	while (height > 0) {
+		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
+
+		for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
+			if (slot->slots[i] != NULL)
+				break;
+			index &= ~((1UL << shift) - 1);
+			index += 1UL << shift;
+			if (index == 0)
+				goto out;	/* 32-bit wraparound */
+		}
+		if (i == RADIX_TREE_MAP_SIZE)
+			goto out;
+		height--;
+		if (height == 0) {	/* Bottom level: grab some items */
+			unsigned long j = index & RADIX_TREE_MAP_MASK;
+
+			for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
+				index++;
+				if (slot->slots[j]) {
+					results[nr_found++] = slot->slots[j];
+					if (nr_found == max_items)
+						goto out;
+				}
+			}
+		}
+		shift -= RADIX_TREE_MAP_SHIFT;
+		slot = slot->slots[i];
+	}
+out:
+	*next_index = index;
+	return nr_found;
+}
+
+/**
+ *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
+ *	@root:		radix tree root
+ *	@results:	where the results of the lookup are placed
+ *	@first_index:	start the lookup from this key
+ *	@max_items:	place up to this many items at *results
+ *
+ *	Performs an index-ascending scan of the tree for present items.  Places
+ *	them at *@results and returns the number of items which were placed at
+ *	*@results.
+ *
+ *	The implementation is naive.
+ */
+unsigned int
+radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+			unsigned long first_index, unsigned int max_items)
+{
+	const unsigned long max_index = radix_tree_maxindex(root->height);
+	unsigned long cur_index = first_index;
+	unsigned int ret = 0;
+
+	while (ret < max_items) {
+		unsigned int nr_found;
+		unsigned long next_index;	/* Index of next search */
+
+		if (cur_index > max_index)
+			break;
+		nr_found = __lookup(root, results + ret, cur_index,
+					max_items - ret, &next_index);
+		ret += nr_found;
+		if (next_index == 0)
+			break;
+		cur_index = next_index;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup);
+
+/*
+ * FIXME: the two tag_get()s here should use find_next_bit() instead of
+ * open-coding the search.
+ */
+static unsigned int
+__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
+	unsigned int max_items, unsigned long *next_index, int tag)
+{
+	unsigned int nr_found = 0;
+	unsigned int shift;
+	unsigned int height = root->height;
+	struct radix_tree_node *slot;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	slot = root->rnode;
+
+	while (height > 0) {
+		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
+
+		for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
+			if (tag_get(slot, tag, i)) {
+				BUG_ON(slot->slots[i] == NULL);
+				break;
+			}
+			index &= ~((1UL << shift) - 1);
+			index += 1UL << shift;
+			if (index == 0)
+				goto out;	/* 32-bit wraparound */
+		}
+		if (i == RADIX_TREE_MAP_SIZE)
+			goto out;
+		height--;
+		if (height == 0) {	/* Bottom level: grab some items */
+			unsigned long j = index & RADIX_TREE_MAP_MASK;
+
+			for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
+				index++;
+				if (tag_get(slot, tag, j)) {
+					BUG_ON(slot->slots[j] == NULL);
+					results[nr_found++] = slot->slots[j];
+					if (nr_found == max_items)
+						goto out;
+				}
+			}
+		}
+		shift -= RADIX_TREE_MAP_SHIFT;
+		slot = slot->slots[i];
+	}
+out:
+	*next_index = index;
+	return nr_found;
+}
+
+/**
+ *	radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
+ *	                             based on a tag
+ *	@root:		radix tree root
+ *	@results:	where the results of the lookup are placed
+ *	@first_index:	start the lookup from this key
+ *	@max_items:	place up to this many items at *results
+ *	@tag:		the tag index
+ *
+ *	Performs an index-ascending scan of the tree for present items which
+ *	have the tag indexed by @tag set.  Places the items at *@results and
+ *	returns the number of items which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+		unsigned long first_index, unsigned int max_items, int tag)
+{
+	const unsigned long max_index = radix_tree_maxindex(root->height);
+	unsigned long cur_index = first_index;
+	unsigned int ret = 0;
+
+	while (ret < max_items) {
+		unsigned int nr_found;
+		unsigned long next_index;	/* Index of next search */
+
+		if (cur_index > max_index)
+			break;
+		nr_found = __lookup_tag(root, results + ret, cur_index,
+					max_items - ret, &next_index, tag);
+		ret += nr_found;
+		if (next_index == 0)
+			break;
+		cur_index = next_index;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
+
+/**
+ *	radix_tree_delete    -    delete an item from a radix tree
+ *	@root:		radix tree root
+ *	@index:		index key
+ *
+ *	Remove the item at @index from the radix tree rooted at @root.
+ *
+ *	Returns the address of the deleted item, or NULL if it was not present.
+ */
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+	struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
+	struct radix_tree_path *orig_pathp;
+	unsigned int height, shift;
+	void *ret = NULL;
+	char tags[RADIX_TREE_TAGS];
+	int nr_cleared_tags;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		goto out;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	pathp->node = NULL;
+	pathp->slot = &root->rnode;
+
+	while (height > 0) {
+		int offset;
+
+		if (*pathp->slot == NULL)
+			goto out;
+
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+		pathp[1].offset = offset;
+		pathp[1].node = *pathp[0].slot;
+		pathp[1].slot = (struct radix_tree_node **)
+				(pathp[1].node->slots + offset);
+		pathp++;
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	ret = *pathp[0].slot;
+	if (ret == NULL)
+		goto out;
+
+	orig_pathp = pathp;
+
+	/*
+	 * Clear all tags associated with the just-deleted item
+	 */
+	memset(tags, 0, sizeof(tags));
+	do {
+		int tag;
+
+		nr_cleared_tags = RADIX_TREE_TAGS;
+		for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
+			int idx;
+
+			if (tags[tag])
+				continue;
+
+			tag_clear(pathp[0].node, tag, pathp[0].offset);
+
+			for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+				if (pathp[0].node->tags[tag][idx]) {
+					tags[tag] = 1;
+					nr_cleared_tags--;
+					break;
+				}
+			}
+		}
+		pathp--;
+	} while (pathp[0].node && nr_cleared_tags);
+
+	pathp = orig_pathp;
+	*pathp[0].slot = NULL;
+	while (pathp[0].node && --pathp[0].node->count == 0) {
+		pathp--;
+		BUG_ON(*pathp[0].slot == NULL);
+		*pathp[0].slot = NULL;
+		radix_tree_node_free(pathp[1].node);
+	}
+	if (root->rnode == NULL)
+		root->height = 0;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_delete);
+
+/**
+ *	radix_tree_tagged - test whether any items in the tree are tagged
+ *	@root:		radix tree root
+ *	@tag:		tag to test
+ */
+int radix_tree_tagged(struct radix_tree_root *root, int tag)
+{
+	int idx;
+
+	if (!root->rnode)
+		return 0;
+	for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+		if (root->rnode->tags[tag][idx])
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(radix_tree_tagged);
+
+static void
+radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags)
+{
+	memset(node, 0, sizeof(struct radix_tree_node));
+}
+
+static __init unsigned long __maxindex(unsigned int height)
+{
+	unsigned int tmp = height * RADIX_TREE_MAP_SHIFT;
+	unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1;
+
+	if (tmp >= RADIX_TREE_INDEX_BITS)
+		index = ~0UL;
+	return index;
+}
+
+static __init void radix_tree_init_maxindex(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
+		height_to_maxindex[i] = __maxindex(i);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int radix_tree_callback(struct notifier_block *nfb,
+                            unsigned long action,
+                            void *hcpu)
+{
+       int cpu = (long)hcpu;
+       struct radix_tree_preload *rtp;
+
+       /* Free per-cpu pool of perloaded nodes */
+       if (action == CPU_DEAD) {
+               rtp = &per_cpu(radix_tree_preloads, cpu);
+               while (rtp->nr) {
+                       kmem_cache_free(radix_tree_node_cachep,
+                                       rtp->nodes[rtp->nr-1]);
+                       rtp->nodes[rtp->nr-1] = NULL;
+                       rtp->nr--;
+               }
+       }
+       return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void __init radix_tree_init(void)
+{
+	radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
+			sizeof(struct radix_tree_node), 0,
+			SLAB_PANIC, radix_tree_node_ctor, NULL);
+	radix_tree_init_maxindex();
+	hotcpu_notifier(radix_tree_callback, 0);
+}
diff --git a/lib/rbtree.c b/lib/rbtree.c
new file mode 100644
index 0000000..14b791a
--- /dev/null
+++ b/lib/rbtree.c
@@ -0,0 +1,394 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree.h>
+#include <linux/module.h>
+
+static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
+{
+	struct rb_node *right = node->rb_right;
+
+	if ((node->rb_right = right->rb_left))
+		right->rb_left->rb_parent = node;
+	right->rb_left = node;
+
+	if ((right->rb_parent = node->rb_parent))
+	{
+		if (node == node->rb_parent->rb_left)
+			node->rb_parent->rb_left = right;
+		else
+			node->rb_parent->rb_right = right;
+	}
+	else
+		root->rb_node = right;
+	node->rb_parent = right;
+}
+
+static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
+{
+	struct rb_node *left = node->rb_left;
+
+	if ((node->rb_left = left->rb_right))
+		left->rb_right->rb_parent = node;
+	left->rb_right = node;
+
+	if ((left->rb_parent = node->rb_parent))
+	{
+		if (node == node->rb_parent->rb_right)
+			node->rb_parent->rb_right = left;
+		else
+			node->rb_parent->rb_left = left;
+	}
+	else
+		root->rb_node = left;
+	node->rb_parent = left;
+}
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+	struct rb_node *parent, *gparent;
+
+	while ((parent = node->rb_parent) && parent->rb_color == RB_RED)
+	{
+		gparent = parent->rb_parent;
+
+		if (parent == gparent->rb_left)
+		{
+			{
+				register struct rb_node *uncle = gparent->rb_right;
+				if (uncle && uncle->rb_color == RB_RED)
+				{
+					uncle->rb_color = RB_BLACK;
+					parent->rb_color = RB_BLACK;
+					gparent->rb_color = RB_RED;
+					node = gparent;
+					continue;
+				}
+			}
+
+			if (parent->rb_right == node)
+			{
+				register struct rb_node *tmp;
+				__rb_rotate_left(parent, root);
+				tmp = parent;
+				parent = node;
+				node = tmp;
+			}
+
+			parent->rb_color = RB_BLACK;
+			gparent->rb_color = RB_RED;
+			__rb_rotate_right(gparent, root);
+		} else {
+			{
+				register struct rb_node *uncle = gparent->rb_left;
+				if (uncle && uncle->rb_color == RB_RED)
+				{
+					uncle->rb_color = RB_BLACK;
+					parent->rb_color = RB_BLACK;
+					gparent->rb_color = RB_RED;
+					node = gparent;
+					continue;
+				}
+			}
+
+			if (parent->rb_left == node)
+			{
+				register struct rb_node *tmp;
+				__rb_rotate_right(parent, root);
+				tmp = parent;
+				parent = node;
+				node = tmp;
+			}
+
+			parent->rb_color = RB_BLACK;
+			gparent->rb_color = RB_RED;
+			__rb_rotate_left(gparent, root);
+		}
+	}
+
+	root->rb_node->rb_color = RB_BLACK;
+}
+EXPORT_SYMBOL(rb_insert_color);
+
+static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
+			     struct rb_root *root)
+{
+	struct rb_node *other;
+
+	while ((!node || node->rb_color == RB_BLACK) && node != root->rb_node)
+	{
+		if (parent->rb_left == node)
+		{
+			other = parent->rb_right;
+			if (other->rb_color == RB_RED)
+			{
+				other->rb_color = RB_BLACK;
+				parent->rb_color = RB_RED;
+				__rb_rotate_left(parent, root);
+				other = parent->rb_right;
+			}
+			if ((!other->rb_left ||
+			     other->rb_left->rb_color == RB_BLACK)
+			    && (!other->rb_right ||
+				other->rb_right->rb_color == RB_BLACK))
+			{
+				other->rb_color = RB_RED;
+				node = parent;
+				parent = node->rb_parent;
+			}
+			else
+			{
+				if (!other->rb_right ||
+				    other->rb_right->rb_color == RB_BLACK)
+				{
+					register struct rb_node *o_left;
+					if ((o_left = other->rb_left))
+						o_left->rb_color = RB_BLACK;
+					other->rb_color = RB_RED;
+					__rb_rotate_right(other, root);
+					other = parent->rb_right;
+				}
+				other->rb_color = parent->rb_color;
+				parent->rb_color = RB_BLACK;
+				if (other->rb_right)
+					other->rb_right->rb_color = RB_BLACK;
+				__rb_rotate_left(parent, root);
+				node = root->rb_node;
+				break;
+			}
+		}
+		else
+		{
+			other = parent->rb_left;
+			if (other->rb_color == RB_RED)
+			{
+				other->rb_color = RB_BLACK;
+				parent->rb_color = RB_RED;
+				__rb_rotate_right(parent, root);
+				other = parent->rb_left;
+			}
+			if ((!other->rb_left ||
+			     other->rb_left->rb_color == RB_BLACK)
+			    && (!other->rb_right ||
+				other->rb_right->rb_color == RB_BLACK))
+			{
+				other->rb_color = RB_RED;
+				node = parent;
+				parent = node->rb_parent;
+			}
+			else
+			{
+				if (!other->rb_left ||
+				    other->rb_left->rb_color == RB_BLACK)
+				{
+					register struct rb_node *o_right;
+					if ((o_right = other->rb_right))
+						o_right->rb_color = RB_BLACK;
+					other->rb_color = RB_RED;
+					__rb_rotate_left(other, root);
+					other = parent->rb_left;
+				}
+				other->rb_color = parent->rb_color;
+				parent->rb_color = RB_BLACK;
+				if (other->rb_left)
+					other->rb_left->rb_color = RB_BLACK;
+				__rb_rotate_right(parent, root);
+				node = root->rb_node;
+				break;
+			}
+		}
+	}
+	if (node)
+		node->rb_color = RB_BLACK;
+}
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+	struct rb_node *child, *parent;
+	int color;
+
+	if (!node->rb_left)
+		child = node->rb_right;
+	else if (!node->rb_right)
+		child = node->rb_left;
+	else
+	{
+		struct rb_node *old = node, *left;
+
+		node = node->rb_right;
+		while ((left = node->rb_left) != NULL)
+			node = left;
+		child = node->rb_right;
+		parent = node->rb_parent;
+		color = node->rb_color;
+
+		if (child)
+			child->rb_parent = parent;
+		if (parent)
+		{
+			if (parent->rb_left == node)
+				parent->rb_left = child;
+			else
+				parent->rb_right = child;
+		}
+		else
+			root->rb_node = child;
+
+		if (node->rb_parent == old)
+			parent = node;
+		node->rb_parent = old->rb_parent;
+		node->rb_color = old->rb_color;
+		node->rb_right = old->rb_right;
+		node->rb_left = old->rb_left;
+
+		if (old->rb_parent)
+		{
+			if (old->rb_parent->rb_left == old)
+				old->rb_parent->rb_left = node;
+			else
+				old->rb_parent->rb_right = node;
+		} else
+			root->rb_node = node;
+
+		old->rb_left->rb_parent = node;
+		if (old->rb_right)
+			old->rb_right->rb_parent = node;
+		goto color;
+	}
+
+	parent = node->rb_parent;
+	color = node->rb_color;
+
+	if (child)
+		child->rb_parent = parent;
+	if (parent)
+	{
+		if (parent->rb_left == node)
+			parent->rb_left = child;
+		else
+			parent->rb_right = child;
+	}
+	else
+		root->rb_node = child;
+
+ color:
+	if (color == RB_BLACK)
+		__rb_erase_color(child, parent, root);
+}
+EXPORT_SYMBOL(rb_erase);
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(struct rb_root *root)
+{
+	struct rb_node	*n;
+
+	n = root->rb_node;
+	if (!n)
+		return NULL;
+	while (n->rb_left)
+		n = n->rb_left;
+	return n;
+}
+EXPORT_SYMBOL(rb_first);
+
+struct rb_node *rb_last(struct rb_root *root)
+{
+	struct rb_node	*n;
+
+	n = root->rb_node;
+	if (!n)
+		return NULL;
+	while (n->rb_right)
+		n = n->rb_right;
+	return n;
+}
+EXPORT_SYMBOL(rb_last);
+
+struct rb_node *rb_next(struct rb_node *node)
+{
+	/* If we have a right-hand child, go down and then left as far
+	   as we can. */
+	if (node->rb_right) {
+		node = node->rb_right; 
+		while (node->rb_left)
+			node=node->rb_left;
+		return node;
+	}
+
+	/* No right-hand children.  Everything down and left is
+	   smaller than us, so any 'next' node must be in the general
+	   direction of our parent. Go up the tree; any time the
+	   ancestor is a right-hand child of its parent, keep going
+	   up. First time it's a left-hand child of its parent, said
+	   parent is our 'next' node. */
+	while (node->rb_parent && node == node->rb_parent->rb_right)
+		node = node->rb_parent;
+
+	return node->rb_parent;
+}
+EXPORT_SYMBOL(rb_next);
+
+struct rb_node *rb_prev(struct rb_node *node)
+{
+	/* If we have a left-hand child, go down and then right as far
+	   as we can. */
+	if (node->rb_left) {
+		node = node->rb_left; 
+		while (node->rb_right)
+			node=node->rb_right;
+		return node;
+	}
+
+	/* No left-hand children. Go up till we find an ancestor which
+	   is a right-hand child of its parent */
+	while (node->rb_parent && node == node->rb_parent->rb_left)
+		node = node->rb_parent;
+
+	return node->rb_parent;
+}
+EXPORT_SYMBOL(rb_prev);
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+		     struct rb_root *root)
+{
+	struct rb_node *parent = victim->rb_parent;
+
+	/* Set the surrounding nodes to point to the replacement */
+	if (parent) {
+		if (victim == parent->rb_left)
+			parent->rb_left = new;
+		else
+			parent->rb_right = new;
+	} else {
+		root->rb_node = new;
+	}
+	if (victim->rb_left)
+		victim->rb_left->rb_parent = new;
+	if (victim->rb_right)
+		victim->rb_right->rb_parent = new;
+
+	/* Copy the pointers/colour from the victim to the replacement */
+	*new = *victim;
+}
+EXPORT_SYMBOL(rb_replace_node);
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
new file mode 100644
index 0000000..747a2de
--- /dev/null
+++ b/lib/reed_solomon/Makefile
@@ -0,0 +1,6 @@
+#
+# This is a modified version of reed solomon lib, 
+#
+
+obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
+
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
new file mode 100644
index 0000000..d401dec
--- /dev/null
+++ b/lib/reed_solomon/decode_rs.c
@@ -0,0 +1,272 @@
+/* 
+ * lib/reed_solomon/decode_rs.c
+ *
+ * Overview:
+ *   Generic Reed Solomon encoder / decoder library
+ *   
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: decode_rs.c,v 1.6 2004/10/22 15:41:47 gleixner Exp $
+ *
+ */
+
+/* Generic data width independent code which is included by the 
+ * wrappers.
+ */
+{ 
+	int deg_lambda, el, deg_omega;
+	int i, j, r, k, pad;
+	int nn = rs->nn;
+	int nroots = rs->nroots;
+	int fcr = rs->fcr;
+	int prim = rs->prim;
+	int iprim = rs->iprim;
+	uint16_t *alpha_to = rs->alpha_to;
+	uint16_t *index_of = rs->index_of;
+	uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
+	/* Err+Eras Locator poly and syndrome poly The maximum value
+	 * of nroots is 8. So the necessary stack size will be about
+	 * 220 bytes max.
+	 */
+	uint16_t lambda[nroots + 1], syn[nroots];
+	uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1];
+	uint16_t root[nroots], reg[nroots + 1], loc[nroots];
+	int count = 0;
+	uint16_t msk = (uint16_t) rs->nn;
+
+	/* Check length parameter for validity */
+	pad = nn - nroots - len;
+	if (pad < 0 || pad >= nn)
+		return -ERANGE;
+		
+	/* Does the caller provide the syndrome ? */
+	if (s != NULL) 
+		goto decode;
+
+	/* form the syndromes; i.e., evaluate data(x) at roots of
+	 * g(x) */
+	for (i = 0; i < nroots; i++)
+		syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk;
+
+	for (j = 1; j < len; j++) {
+		for (i = 0; i < nroots; i++) {
+			if (syn[i] == 0) {
+				syn[i] = (((uint16_t) data[j]) ^ 
+					  invmsk) & msk;
+			} else {
+				syn[i] = ((((uint16_t) data[j]) ^
+					   invmsk) & msk) ^ 
+					alpha_to[rs_modnn(rs, index_of[syn[i]] +
+						       (fcr + i) * prim)];
+			}
+		}
+	}
+
+	for (j = 0; j < nroots; j++) {
+		for (i = 0; i < nroots; i++) {
+			if (syn[i] == 0) {
+				syn[i] = ((uint16_t) par[j]) & msk;
+			} else {
+				syn[i] = (((uint16_t) par[j]) & msk) ^ 
+					alpha_to[rs_modnn(rs, index_of[syn[i]] +
+						       (fcr+i)*prim)];
+			}
+		}
+	}
+	s = syn;
+
+	/* Convert syndromes to index form, checking for nonzero condition */
+	syn_error = 0;
+	for (i = 0; i < nroots; i++) {
+		syn_error |= s[i];
+		s[i] = index_of[s[i]];
+	}
+
+	if (!syn_error) {
+		/* if syndrome is zero, data[] is a codeword and there are no
+		 * errors to correct. So return data[] unmodified
+		 */
+		count = 0;
+		goto finish;
+	}
+
+ decode:
+	memset(&lambda[1], 0, nroots * sizeof(lambda[0]));
+	lambda[0] = 1;
+
+	if (no_eras > 0) {
+		/* Init lambda to be the erasure locator polynomial */
+		lambda[1] = alpha_to[rs_modnn(rs, 
+					      prim * (nn - 1 - eras_pos[0]))];
+		for (i = 1; i < no_eras; i++) {
+			u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+			for (j = i + 1; j > 0; j--) {
+				tmp = index_of[lambda[j - 1]];
+				if (tmp != nn) {
+					lambda[j] ^= 
+						alpha_to[rs_modnn(rs, u + tmp)];
+				}
+			}
+		}
+	}
+
+	for (i = 0; i < nroots + 1; i++)
+		b[i] = index_of[lambda[i]];
+
+	/*
+	 * Begin Berlekamp-Massey algorithm to determine error+erasure
+	 * locator polynomial
+	 */
+	r = no_eras;
+	el = no_eras;
+	while (++r <= nroots) {	/* r is the step number */
+		/* Compute discrepancy at the r-th step in poly-form */
+		discr_r = 0;
+		for (i = 0; i < r; i++) {
+			if ((lambda[i] != 0) && (s[r - i - 1] != nn)) {
+				discr_r ^= 
+					alpha_to[rs_modnn(rs, 
+							  index_of[lambda[i]] +
+							  s[r - i - 1])];
+			}
+		}
+		discr_r = index_of[discr_r];	/* Index form */
+		if (discr_r == nn) {
+			/* 2 lines below: B(x) <-- x*B(x) */
+			memmove (&b[1], b, nroots * sizeof (b[0]));
+			b[0] = nn;
+		} else {
+			/* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */
+			t[0] = lambda[0];
+			for (i = 0; i < nroots; i++) {
+				if (b[i] != nn) {
+					t[i + 1] = lambda[i + 1] ^ 
+						alpha_to[rs_modnn(rs, discr_r +
+								  b[i])];
+				} else
+					t[i + 1] = lambda[i + 1];
+			}
+			if (2 * el <= r + no_eras - 1) {
+				el = r + no_eras - el;
+				/*
+				 * 2 lines below: B(x) <-- inv(discr_r) *
+				 * lambda(x)
+				 */
+				for (i = 0; i <= nroots; i++) {
+					b[i] = (lambda[i] == 0) ? nn :
+						rs_modnn(rs, index_of[lambda[i]]
+							 - discr_r + nn);
+				}
+			} else {
+				/* 2 lines below: B(x) <-- x*B(x) */
+				memmove(&b[1], b, nroots * sizeof(b[0]));
+				b[0] = nn;
+			}
+			memcpy(lambda, t, (nroots + 1) * sizeof(t[0]));
+		}
+	}
+
+	/* Convert lambda to index form and compute deg(lambda(x)) */
+	deg_lambda = 0;
+	for (i = 0; i < nroots + 1; i++) {
+		lambda[i] = index_of[lambda[i]];
+		if (lambda[i] != nn)
+			deg_lambda = i;
+	}
+	/* Find roots of error+erasure locator polynomial by Chien search */
+	memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
+	count = 0;		/* Number of roots of lambda(x) */
+	for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) {
+		q = 1;		/* lambda[0] is always 0 */
+		for (j = deg_lambda; j > 0; j--) {
+			if (reg[j] != nn) {
+				reg[j] = rs_modnn(rs, reg[j] + j);
+				q ^= alpha_to[reg[j]];
+			}
+		}
+		if (q != 0)
+			continue;	/* Not a root */
+		/* store root (index-form) and error location number */
+		root[count] = i;
+		loc[count] = k;
+		/* If we've already found max possible roots,
+		 * abort the search to save time
+		 */
+		if (++count == deg_lambda)
+			break;
+	}
+	if (deg_lambda != count) {
+		/*
+		 * deg(lambda) unequal to number of roots => uncorrectable
+		 * error detected
+		 */
+		count = -1;
+		goto finish;
+	}
+	/*
+	 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
+	 * x**nroots). in index form. Also find deg(omega).
+	 */
+	deg_omega = deg_lambda - 1;
+	for (i = 0; i <= deg_omega; i++) {
+		tmp = 0;
+		for (j = i; j >= 0; j--) {
+			if ((s[i - j] != nn) && (lambda[j] != nn))
+				tmp ^=
+				    alpha_to[rs_modnn(rs, s[i - j] + lambda[j])];
+		}
+		omega[i] = index_of[tmp];
+	}
+
+	/*
+	 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
+	 * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+	 */
+	for (j = count - 1; j >= 0; j--) {
+		num1 = 0;
+		for (i = deg_omega; i >= 0; i--) {
+			if (omega[i] != nn)
+				num1 ^= alpha_to[rs_modnn(rs, omega[i] + 
+							i * root[j])];
+		}
+		num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
+		den = 0;
+
+		/* lambda[i+1] for i even is the formal derivative
+		 * lambda_pr of lambda[i] */
+		for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) {
+			if (lambda[i + 1] != nn) {
+				den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + 
+						       i * root[j])];
+			}
+		}
+		/* Apply error to data */
+		if (num1 != 0 && loc[j] >= pad) {
+			uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + 
+						       index_of[num2] +
+						       nn - index_of[den])];
+			/* Store the error correction pattern, if a
+			 * correction buffer is available */
+			if (corr) {
+				corr[j] = cor;
+			} else {
+				/* If a data buffer is given and the
+				 * error is inside the message,
+				 * correct it */
+				if (data && (loc[j] < (nn - nroots)))
+					data[loc[j] - pad] ^= cor;
+			}
+		}
+	}
+
+finish:
+	if (eras_pos != NULL) {
+		for (i = 0; i < count; i++)
+			eras_pos[i] = loc[i] - pad;
+	}
+	return count;
+
+}
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c
new file mode 100644
index 0000000..237bf65
--- /dev/null
+++ b/lib/reed_solomon/encode_rs.c
@@ -0,0 +1,54 @@
+/* 
+ * lib/reed_solomon/encode_rs.c
+ *
+ * Overview:
+ *   Generic Reed Solomon encoder / decoder library
+ *   
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: encode_rs.c,v 1.4 2004/10/22 15:41:47 gleixner Exp $
+ *
+ */
+
+/* Generic data width independent code which is included by the 
+ * wrappers.
+ * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par)
+ */
+{
+	int i, j, pad;
+	int nn = rs->nn;
+	int nroots = rs->nroots;
+	uint16_t *alpha_to = rs->alpha_to;
+	uint16_t *index_of = rs->index_of;
+	uint16_t *genpoly = rs->genpoly;
+	uint16_t fb;
+	uint16_t msk = (uint16_t) rs->nn;
+
+	/* Check length parameter for validity */
+	pad = nn - nroots - len;
+	if (pad < 0 || pad >= nn)
+		return -ERANGE;
+
+	for (i = 0; i < len; i++) {
+		fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]];
+		/* feedback term is non-zero */
+		if (fb != nn) {	
+			for (j = 1; j < nroots; j++) {
+				par[j] ^= alpha_to[rs_modnn(rs, fb + 
+							 genpoly[nroots - j])];
+			}
+		}
+		/* Shift */
+		memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1));
+		if (fb != nn) {
+			par[nroots - 1] = alpha_to[rs_modnn(rs, 
+							    fb + genpoly[0])];
+		} else {
+			par[nroots - 1] = 0;
+		}
+	}
+	return 0;
+}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
new file mode 100644
index 0000000..6604e3b
--- /dev/null
+++ b/lib/reed_solomon/reed_solomon.c
@@ -0,0 +1,335 @@
+/* 
+ * lib/reed_solomon/rslib.c
+ *
+ * Overview:
+ *   Generic Reed Solomon encoder / decoder library
+ *   
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Reed Solomon code lifted from reed solomon library written by Phil Karn
+ * Copyright 2002 Phil Karn, KA9Q
+ *
+ * $Id: rslib.c,v 1.5 2004/10/22 15:41:47 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *	
+ * The generic Reed Solomon library provides runtime configurable
+ * encoding / decoding of RS codes.
+ * Each user must call init_rs to get a pointer to a rs_control
+ * structure for the given rs parameters. This structure is either
+ * generated or a already available matching control structure is used.
+ * If a structure is generated then the polynomial arrays for
+ * fast encoding / decoding are built. This can take some time so
+ * make sure not to call this function from a time critical path.
+ * Usually a module / driver should initialize the necessary 
+ * rs_control structure on module / driver init and release it
+ * on exit.
+ * The encoding puts the calculated syndrome into a given syndrome 
+ * buffer. 
+ * The decoding is a two step process. The first step calculates
+ * the syndrome over the received (data + syndrome) and calls the
+ * second stage, which does the decoding / error correction itself.
+ * Many hw encoders provide a syndrome calculation over the received
+ * data + syndrome and can call the second stage directly.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+
+/* This list holds all currently allocated rs control structures */
+static LIST_HEAD (rslist);
+/* Protection for the list */
+static DECLARE_MUTEX(rslistlock);
+
+/** 
+ * rs_init - Initialize a Reed-Solomon codec
+ *
+ * @symsize:	symbol size, bits (1-8)
+ * @gfpoly:	Field generator polynomial coefficients
+ * @fcr:	first root of RS code generator polynomial, index form
+ * @prim:	primitive element to generate polynomial roots
+ * @nroots:	RS code generator polynomial degree (number of roots)
+ *
+ * Allocate a control structure and the polynom arrays for faster
+ * en/decoding. Fill the arrays according to the given parameters
+ */
+static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, 
+				   int prim, int nroots)
+{
+	struct rs_control *rs;
+	int i, j, sr, root, iprim;
+
+	/* Allocate the control structure */
+	rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL);
+	if (rs == NULL)
+		return NULL;
+
+	INIT_LIST_HEAD(&rs->list);
+
+	rs->mm = symsize;
+	rs->nn = (1 << symsize) - 1;
+	rs->fcr = fcr;
+	rs->prim = prim;
+	rs->nroots = nroots;
+	rs->gfpoly = gfpoly;
+
+	/* Allocate the arrays */
+	rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+	if (rs->alpha_to == NULL)
+		goto errrs;
+
+	rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+	if (rs->index_of == NULL)
+		goto erralp;
+
+	rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL);
+	if(rs->genpoly == NULL)
+		goto erridx;
+
+	/* Generate Galois field lookup tables */
+	rs->index_of[0] = rs->nn;	/* log(zero) = -inf */
+	rs->alpha_to[rs->nn] = 0;	/* alpha**-inf = 0 */
+	sr = 1;
+	for (i = 0; i < rs->nn; i++) {
+		rs->index_of[sr] = i;
+		rs->alpha_to[i] = sr;
+		sr <<= 1;
+		if (sr & (1 << symsize))
+			sr ^= gfpoly;
+		sr &= rs->nn;
+	}
+	/* If it's not primitive, exit */
+	if(sr != 1)
+		goto errpol;
+
+	/* Find prim-th root of 1, used in decoding */
+	for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
+	/* prim-th root of 1, index form */
+	rs->iprim = iprim / prim;
+
+	/* Form RS code generator polynomial from its roots */
+	rs->genpoly[0] = 1;
+	for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) {
+		rs->genpoly[i + 1] = 1;
+		/* Multiply rs->genpoly[] by  @**(root + x) */
+		for (j = i; j > 0; j--) {
+			if (rs->genpoly[j] != 0) {
+				rs->genpoly[j] = rs->genpoly[j -1] ^ 
+					rs->alpha_to[rs_modnn(rs, 
+					rs->index_of[rs->genpoly[j]] + root)];
+			} else
+				rs->genpoly[j] = rs->genpoly[j - 1];
+		}
+		/* rs->genpoly[0] can never be zero */
+		rs->genpoly[0] = 
+			rs->alpha_to[rs_modnn(rs, 
+				rs->index_of[rs->genpoly[0]] + root)];
+	}
+	/* convert rs->genpoly[] to index form for quicker encoding */
+	for (i = 0; i <= nroots; i++)
+		rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+	return rs;
+
+	/* Error exit */
+errpol:
+	kfree(rs->genpoly);
+erridx:
+	kfree(rs->index_of);
+erralp:
+	kfree(rs->alpha_to);
+errrs:
+	kfree(rs);
+	return NULL;
+}
+
+
+/** 
+ *  free_rs - Free the rs control structure, if its not longer used
+ *
+ *  @rs:	the control structure which is not longer used by the
+ *		caller
+ */
+void free_rs(struct rs_control *rs)
+{
+	down(&rslistlock);
+	rs->users--;
+	if(!rs->users) {
+		list_del(&rs->list);
+		kfree(rs->alpha_to);
+		kfree(rs->index_of);
+		kfree(rs->genpoly);
+		kfree(rs);
+	}
+	up(&rslistlock);
+}
+
+/** 
+ * init_rs - Find a matching or allocate a new rs control structure
+ *
+ *  @symsize:	the symbol size (number of bits)
+ *  @gfpoly:	the extended Galois field generator polynomial coefficients,
+ *		with the 0th coefficient in the low order bit. The polynomial
+ *		must be primitive;
+ *  @fcr:  	the first consecutive root of the rs code generator polynomial 
+ *		in index form
+ *  @prim:	primitive element to generate polynomial roots
+ *  @nroots:	RS code generator polynomial degree (number of roots)
+ */
+struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, 
+			   int nroots)
+{
+	struct list_head	*tmp;
+	struct rs_control	*rs;
+
+	/* Sanity checks */
+	if (symsize < 1)
+		return NULL;
+	if (fcr < 0 || fcr >= (1<<symsize))
+    		return NULL;
+	if (prim <= 0 || prim >= (1<<symsize))
+    		return NULL;
+	if (nroots < 0 || nroots >= (1<<symsize) || nroots > 8)
+		return NULL;
+	
+	down(&rslistlock);
+
+	/* Walk through the list and look for a matching entry */
+	list_for_each(tmp, &rslist) {
+		rs = list_entry(tmp, struct rs_control, list);
+		if (symsize != rs->mm)
+			continue;
+		if (gfpoly != rs->gfpoly)
+			continue;
+		if (fcr != rs->fcr)
+			continue;	
+		if (prim != rs->prim)
+			continue;	
+		if (nroots != rs->nroots)
+			continue;
+		/* We have a matching one already */
+		rs->users++;
+		goto out;
+	}
+
+	/* Create a new one */
+	rs = rs_init(symsize, gfpoly, fcr, prim, nroots);
+	if (rs) {
+		rs->users = 1;
+		list_add(&rs->list, &rslist);
+	}
+out:	
+	up(&rslistlock);
+	return rs;
+}
+
+#ifdef CONFIG_REED_SOLOMON_ENC8
+/** 
+ *  encode_rs8 - Calculate the parity for data values (8bit data width)
+ *
+ *  @rs:	the rs control structure
+ *  @data:	data field of a given type
+ *  @len:	data length 
+ *  @par:	parity data, must be initialized by caller (usually all 0)
+ *  @invmsk:	invert data mask (will be xored on data)
+ *
+ *  The parity uses a uint16_t data type to enable
+ *  symbol size > 8. The calling code must take care of encoding of the
+ *  syndrome result for storage itself.
+ */
+int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, 
+	       uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC8
+/** 
+ *  decode_rs8 - Decode codeword (8bit data width)
+ *
+ *  @rs:	the rs control structure
+ *  @data:	data field of a given type
+ *  @par:	received parity data field
+ *  @len:	data length
+ *  @s:		syndrome data field (if NULL, syndrome is calculated)
+ *  @no_eras:	number of erasures
+ *  @eras_pos:	position of erasures, can be NULL
+ *  @invmsk:	invert data mask (will be xored on data, not on parity!)
+ *  @corr:	buffer to store correction bitmask on eras_pos
+ *
+ *  The syndrome and parity uses a uint16_t data type to enable
+ *  symbol size > 8. The calling code must take care of decoding of the
+ *  syndrome result and the received parity before calling this code.
+ */
+int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+	       uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 
+	       uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_ENC16
+/**
+ *  encode_rs16 - Calculate the parity for data values (16bit data width)
+ *
+ *  @rs:	the rs control structure
+ *  @data:	data field of a given type
+ *  @len:	data length 
+ *  @par:	parity data, must be initialized by caller (usually all 0)
+ *  @invmsk:	invert data mask (will be xored on data, not on parity!)
+ *
+ *  Each field in the data array contains up to symbol size bits of valid data.
+ */
+int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, 
+	uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs16);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC16
+/** 
+ *  decode_rs16 - Decode codeword (16bit data width)
+ *
+ *  @rs:	the rs control structure
+ *  @data:	data field of a given type
+ *  @par:	received parity data field
+ *  @len:	data length
+ *  @s:		syndrome data field (if NULL, syndrome is calculated)
+ *  @no_eras:	number of erasures
+ *  @eras_pos:	position of erasures, can be NULL
+ *  @invmsk:	invert data mask (will be xored on data, not on parity!) 
+ *  @corr:	buffer to store correction bitmask on eras_pos
+ *
+ *  Each field in the data array contains up to symbol size bits of valid data.
+ */
+int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+		uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 
+		uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs16);
+#endif
+
+EXPORT_SYMBOL_GPL(init_rs);
+EXPORT_SYMBOL_GPL(free_rs);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
+MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
+
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
new file mode 100644
index 0000000..21f0db2
--- /dev/null
+++ b/lib/rwsem-spinlock.c
@@ -0,0 +1,344 @@
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
+ * generic spinlock implementation
+ *
+ * Copyright (c) 2001   David Howells (dhowells@redhat.com).
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
+ */
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+struct rwsem_waiter {
+	struct list_head list;
+	struct task_struct *task;
+	unsigned int flags;
+#define RWSEM_WAITING_FOR_READ	0x00000001
+#define RWSEM_WAITING_FOR_WRITE	0x00000002
+};
+
+#if RWSEM_DEBUG
+void rwsemtrace(struct rw_semaphore *sem, const char *str)
+{
+	if (sem->debug)
+		printk("[%d] %s({%d,%d})\n",
+		       current->pid, str, sem->activity,
+		       list_empty(&sem->wait_list) ? 0 : 1);
+}
+#endif
+
+/*
+ * initialise the semaphore
+ */
+void fastcall init_rwsem(struct rw_semaphore *sem)
+{
+	sem->activity = 0;
+	spin_lock_init(&sem->wait_lock);
+	INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+	sem->debug = 0;
+#endif
+}
+
+/*
+ * handle the lock release when processes blocked on it that can now run
+ * - if we come here, then:
+ *   - the 'active count' _reached_ zero
+ *   - the 'waiting count' is non-zero
+ * - the spinlock must be held by the caller
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if wakewrite is non-zero
+ */
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+{
+	struct rwsem_waiter *waiter;
+	struct task_struct *tsk;
+	int woken;
+
+	rwsemtrace(sem, "Entering __rwsem_do_wake");
+
+	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+
+	if (!wakewrite) {
+		if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+			goto out;
+		goto dont_wake_writers;
+	}
+
+	/* if we are allowed to wake writers try to grant a single write lock
+	 * if there's a writer at the front of the queue
+	 * - we leave the 'waiting count' incremented to signify potential
+	 *   contention
+	 */
+	if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
+		sem->activity = -1;
+		list_del(&waiter->list);
+		tsk = waiter->task;
+		/* Don't touch waiter after ->task has been NULLed */
+		mb();
+		waiter->task = NULL;
+		wake_up_process(tsk);
+		put_task_struct(tsk);
+		goto out;
+	}
+
+	/* grant an infinite number of read locks to the front of the queue */
+ dont_wake_writers:
+	woken = 0;
+	while (waiter->flags & RWSEM_WAITING_FOR_READ) {
+		struct list_head *next = waiter->list.next;
+
+		list_del(&waiter->list);
+		tsk = waiter->task;
+		mb();
+		waiter->task = NULL;
+		wake_up_process(tsk);
+		put_task_struct(tsk);
+		woken++;
+		if (list_empty(&sem->wait_list))
+			break;
+		waiter = list_entry(next, struct rwsem_waiter, list);
+	}
+
+	sem->activity += woken;
+
+ out:
+	rwsemtrace(sem, "Leaving __rwsem_do_wake");
+	return sem;
+}
+
+/*
+ * wake a single writer
+ */
+static inline struct rw_semaphore *
+__rwsem_wake_one_writer(struct rw_semaphore *sem)
+{
+	struct rwsem_waiter *waiter;
+	struct task_struct *tsk;
+
+	sem->activity = -1;
+
+	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+	list_del(&waiter->list);
+
+	tsk = waiter->task;
+	mb();
+	waiter->task = NULL;
+	wake_up_process(tsk);
+	put_task_struct(tsk);
+	return sem;
+}
+
+/*
+ * get a read lock on the semaphore
+ */
+void fastcall __sched __down_read(struct rw_semaphore *sem)
+{
+	struct rwsem_waiter waiter;
+	struct task_struct *tsk;
+
+	rwsemtrace(sem, "Entering __down_read");
+
+	spin_lock_irq(&sem->wait_lock);
+
+	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+		/* granted */
+		sem->activity++;
+		spin_unlock_irq(&sem->wait_lock);
+		goto out;
+	}
+
+	tsk = current;
+	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+
+	/* set up my own style of waitqueue */
+	waiter.task = tsk;
+	waiter.flags = RWSEM_WAITING_FOR_READ;
+	get_task_struct(tsk);
+
+	list_add_tail(&waiter.list, &sem->wait_list);
+
+	/* we don't need to touch the semaphore struct anymore */
+	spin_unlock_irq(&sem->wait_lock);
+
+	/* wait to be given the lock */
+	for (;;) {
+		if (!waiter.task)
+			break;
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+
+	tsk->state = TASK_RUNNING;
+
+ out:
+	rwsemtrace(sem, "Leaving __down_read");
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int fastcall __down_read_trylock(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	rwsemtrace(sem, "Entering __down_read_trylock");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+		/* granted */
+		sem->activity++;
+		ret = 1;
+	}
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving __down_read_trylock");
+	return ret;
+}
+
+/*
+ * get a write lock on the semaphore
+ * - we increment the waiting count anyway to indicate an exclusive lock
+ */
+void fastcall __sched __down_write(struct rw_semaphore *sem)
+{
+	struct rwsem_waiter waiter;
+	struct task_struct *tsk;
+
+	rwsemtrace(sem, "Entering __down_write");
+
+	spin_lock_irq(&sem->wait_lock);
+
+	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+		/* granted */
+		sem->activity = -1;
+		spin_unlock_irq(&sem->wait_lock);
+		goto out;
+	}
+
+	tsk = current;
+	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+
+	/* set up my own style of waitqueue */
+	waiter.task = tsk;
+	waiter.flags = RWSEM_WAITING_FOR_WRITE;
+	get_task_struct(tsk);
+
+	list_add_tail(&waiter.list, &sem->wait_list);
+
+	/* we don't need to touch the semaphore struct anymore */
+	spin_unlock_irq(&sem->wait_lock);
+
+	/* wait to be given the lock */
+	for (;;) {
+		if (!waiter.task)
+			break;
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+
+	tsk->state = TASK_RUNNING;
+
+ out:
+	rwsemtrace(sem, "Leaving __down_write");
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int fastcall __down_write_trylock(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	rwsemtrace(sem, "Entering __down_write_trylock");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+		/* granted */
+		sem->activity = -1;
+		ret = 1;
+	}
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving __down_write_trylock");
+	return ret;
+}
+
+/*
+ * release a read lock on the semaphore
+ */
+void fastcall __up_read(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	rwsemtrace(sem, "Entering __up_read");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+		sem = __rwsem_wake_one_writer(sem);
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving __up_read");
+}
+
+/*
+ * release a write lock on the semaphore
+ */
+void fastcall __up_write(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	rwsemtrace(sem, "Entering __up_write");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	sem->activity = 0;
+	if (!list_empty(&sem->wait_list))
+		sem = __rwsem_do_wake(sem, 1);
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving __up_write");
+}
+
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void fastcall __downgrade_write(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	rwsemtrace(sem, "Entering __downgrade_write");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	sem->activity = 1;
+	if (!list_empty(&sem->wait_list))
+		sem = __rwsem_do_wake(sem, 0);
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving __downgrade_write");
+}
+
+EXPORT_SYMBOL(init_rwsem);
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+#if RWSEM_DEBUG
+EXPORT_SYMBOL(rwsemtrace);
+#endif
diff --git a/lib/rwsem.c b/lib/rwsem.c
new file mode 100644
index 0000000..7644089
--- /dev/null
+++ b/lib/rwsem.c
@@ -0,0 +1,268 @@
+/* rwsem.c: R/W semaphores: contention handling functions
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ * Derived from arch/i386/kernel/semaphore.c
+ */
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+struct rwsem_waiter {
+	struct list_head list;
+	struct task_struct *task;
+	unsigned int flags;
+#define RWSEM_WAITING_FOR_READ	0x00000001
+#define RWSEM_WAITING_FOR_WRITE	0x00000002
+};
+
+#if RWSEM_DEBUG
+#undef rwsemtrace
+void rwsemtrace(struct rw_semaphore *sem, const char *str)
+{
+	printk("sem=%p\n", sem);
+	printk("(sem)=%08lx\n", sem->count);
+	if (sem->debug)
+		printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
+}
+#endif
+
+/*
+ * handle the lock release when processes blocked on it that can now run
+ * - if we come here from up_xxxx(), then:
+ *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
+ *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
+ *   - there must be someone on the queue
+ * - the spinlock must be held by the caller
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if downgrading is false
+ */
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+{
+	struct rwsem_waiter *waiter;
+	struct task_struct *tsk;
+	struct list_head *next;
+	signed long oldcount, woken, loop;
+
+	rwsemtrace(sem, "Entering __rwsem_do_wake");
+
+	if (downgrading)
+		goto dont_wake_writers;
+
+	/* if we came through an up_xxxx() call, we only only wake someone up
+	 * if we can transition the active part of the count from 0 -> 1
+	 */
+ try_again:
+	oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
+						- RWSEM_ACTIVE_BIAS;
+	if (oldcount & RWSEM_ACTIVE_MASK)
+		goto undo;
+
+	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+
+	/* try to grant a single write lock if there's a writer at the front
+	 * of the queue - note we leave the 'active part' of the count
+	 * incremented by 1 and the waiting part incremented by 0x00010000
+	 */
+	if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
+		goto readers_only;
+
+	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
+	 * It is an allocated on the waiter's stack and may become invalid at
+	 * any time after that point (due to a wakeup from another source).
+	 */
+	list_del(&waiter->list);
+	tsk = waiter->task;
+	mb();
+	waiter->task = NULL;
+	wake_up_process(tsk);
+	put_task_struct(tsk);
+	goto out;
+
+	/* don't want to wake any writers */
+ dont_wake_writers:
+	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+	if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+		goto out;
+
+	/* grant an infinite number of read locks to the readers at the front
+	 * of the queue
+	 * - note we increment the 'active part' of the count by the number of
+	 *   readers before waking any processes up
+	 */
+ readers_only:
+	woken = 0;
+	do {
+		woken++;
+
+		if (waiter->list.next == &sem->wait_list)
+			break;
+
+		waiter = list_entry(waiter->list.next,
+					struct rwsem_waiter, list);
+
+	} while (waiter->flags & RWSEM_WAITING_FOR_READ);
+
+	loop = woken;
+	woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
+	if (!downgrading)
+		/* we'd already done one increment earlier */
+		woken -= RWSEM_ACTIVE_BIAS;
+
+	rwsem_atomic_add(woken, sem);
+
+	next = sem->wait_list.next;
+	for (; loop > 0; loop--) {
+		waiter = list_entry(next, struct rwsem_waiter, list);
+		next = waiter->list.next;
+		tsk = waiter->task;
+		mb();
+		waiter->task = NULL;
+		wake_up_process(tsk);
+		put_task_struct(tsk);
+	}
+
+	sem->wait_list.next = next;
+	next->prev = &sem->wait_list;
+
+ out:
+	rwsemtrace(sem, "Leaving __rwsem_do_wake");
+	return sem;
+
+	/* undo the change to count, but check for a transition 1->0 */
+ undo:
+	if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
+		goto out;
+	goto try_again;
+}
+
+/*
+ * wait for a lock to be granted
+ */
+static inline struct rw_semaphore *
+rwsem_down_failed_common(struct rw_semaphore *sem,
+			struct rwsem_waiter *waiter, signed long adjustment)
+{
+	struct task_struct *tsk = current;
+	signed long count;
+
+	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+
+	/* set up my own style of waitqueue */
+	spin_lock_irq(&sem->wait_lock);
+	waiter->task = tsk;
+	get_task_struct(tsk);
+
+	list_add_tail(&waiter->list, &sem->wait_list);
+
+	/* we're now waiting on the lock, but no longer actively read-locking */
+	count = rwsem_atomic_update(adjustment, sem);
+
+	/* if there are no active locks, wake the front queued process(es) up */
+	if (!(count & RWSEM_ACTIVE_MASK))
+		sem = __rwsem_do_wake(sem, 0);
+
+	spin_unlock_irq(&sem->wait_lock);
+
+	/* wait to be given the lock */
+	for (;;) {
+		if (!waiter->task)
+			break;
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+
+	tsk->state = TASK_RUNNING;
+
+	return sem;
+}
+
+/*
+ * wait for the read lock to be granted
+ */
+struct rw_semaphore fastcall __sched *
+rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+	struct rwsem_waiter waiter;
+
+	rwsemtrace(sem, "Entering rwsem_down_read_failed");
+
+	waiter.flags = RWSEM_WAITING_FOR_READ;
+	rwsem_down_failed_common(sem, &waiter,
+				RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
+
+	rwsemtrace(sem, "Leaving rwsem_down_read_failed");
+	return sem;
+}
+
+/*
+ * wait for the write lock to be granted
+ */
+struct rw_semaphore fastcall __sched *
+rwsem_down_write_failed(struct rw_semaphore *sem)
+{
+	struct rwsem_waiter waiter;
+
+	rwsemtrace(sem, "Entering rwsem_down_write_failed");
+
+	waiter.flags = RWSEM_WAITING_FOR_WRITE;
+	rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
+
+	rwsemtrace(sem, "Leaving rwsem_down_write_failed");
+	return sem;
+}
+
+/*
+ * handle waking up a waiter on the semaphore
+ * - up_read/up_write has decremented the active part of count if we come here
+ */
+struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	rwsemtrace(sem, "Entering rwsem_wake");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	/* do nothing if list empty */
+	if (!list_empty(&sem->wait_list))
+		sem = __rwsem_do_wake(sem, 0);
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving rwsem_wake");
+
+	return sem;
+}
+
+/*
+ * downgrade a write lock into a read lock
+ * - caller incremented waiting part of count and discovered it still negative
+ * - just wake up any readers at the front of the queue
+ */
+struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	rwsemtrace(sem, "Entering rwsem_downgrade_wake");
+
+	spin_lock_irqsave(&sem->wait_lock, flags);
+
+	/* do nothing if list empty */
+	if (!list_empty(&sem->wait_list))
+		sem = __rwsem_do_wake(sem, 1);
+
+	spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
+	return sem;
+}
+
+EXPORT_SYMBOL(rwsem_down_read_failed);
+EXPORT_SYMBOL(rwsem_down_write_failed);
+EXPORT_SYMBOL(rwsem_wake);
+EXPORT_SYMBOL(rwsem_downgrade_wake);
+#if RWSEM_DEBUG
+EXPORT_SYMBOL(rwsemtrace);
+#endif
diff --git a/lib/sha1.c b/lib/sha1.c
new file mode 100644
index 0000000..2f7f114
--- /dev/null
+++ b/lib/sha1.c
@@ -0,0 +1,96 @@
+/*
+ * SHA transform algorithm, originally taken from code written by
+ * Peter Gutmann, and placed in the public domain.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cryptohash.h>
+
+/* The SHA f()-functions.  */
+
+#define f1(x,y,z)   (z ^ (x & (y ^ z)))		/* x ? y : z */
+#define f2(x,y,z)   (x ^ y ^ z)			/* XOR */
+#define f3(x,y,z)   ((x & y) + (z & (x ^ y)))	/* majority */
+
+/* The SHA Mysterious Constants */
+
+#define K1  0x5A827999L			/* Rounds  0-19: sqrt(2) * 2^30 */
+#define K2  0x6ED9EBA1L			/* Rounds 20-39: sqrt(3) * 2^30 */
+#define K3  0x8F1BBCDCL			/* Rounds 40-59: sqrt(5) * 2^30 */
+#define K4  0xCA62C1D6L			/* Rounds 60-79: sqrt(10) * 2^30 */
+
+/*
+ * sha_transform: single block SHA1 transform
+ *
+ * @digest: 160 bit digest to update
+ * @data:   512 bits of data to hash
+ * @W:      80 words of workspace (see note)
+ *
+ * This function generates a SHA1 digest for a single 512-bit block.
+ * Be warned, it does not handle padding and message digest, do not
+ * confuse it with the full FIPS 180-1 digest algorithm for variable
+ * length messages.
+ *
+ * Note: If the hash is security sensitive, the caller should be sure
+ * to clear the workspace. This is left to the caller to avoid
+ * unnecessary clears between chained hashing operations.
+ */
+void sha_transform(__u32 *digest, const char *in, __u32 *W)
+{
+	__u32 a, b, c, d, e, t, i;
+
+	for (i = 0; i < 16; i++)
+		W[i] = be32_to_cpu(((const __u32 *)in)[i]);
+
+	for (i = 0; i < 64; i++)
+		W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);
+
+	a = digest[0];
+	b = digest[1];
+	c = digest[2];
+	d = digest[3];
+	e = digest[4];
+
+	for (i = 0; i < 20; i++) {
+		t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i];
+		e = d; d = c; c = rol32(b, 30); b = a; a = t;
+	}
+
+	for (; i < 40; i ++) {
+		t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i];
+		e = d; d = c; c = rol32(b, 30); b = a; a = t;
+	}
+
+	for (; i < 60; i ++) {
+		t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i];
+		e = d; d = c; c = rol32(b, 30); b = a; a = t;
+	}
+
+	for (; i < 80; i ++) {
+		t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i];
+		e = d; d = c; c = rol32(b, 30); b = a; a = t;
+	}
+
+	digest[0] += a;
+	digest[1] += b;
+	digest[2] += c;
+	digest[3] += d;
+	digest[4] += e;
+}
+EXPORT_SYMBOL(sha_transform);
+
+/*
+ * sha_init: initialize the vectors for a SHA1 digest
+ *
+ * @buf: vector to initialize
+ */
+void sha_init(__u32 *buf)
+{
+	buf[0] = 0x67452301;
+	buf[1] = 0xefcdab89;
+	buf[2] = 0x98badcfe;
+	buf[3] = 0x10325476;
+	buf[4] = 0xc3d2e1f0;
+}
+
diff --git a/lib/sort.c b/lib/sort.c
new file mode 100644
index 0000000..ea3caed
--- /dev/null
+++ b/lib/sort.c
@@ -0,0 +1,119 @@
+/*
+ * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
+ *
+ * Jan 23 2005  Matt Mackall <mpm@selenic.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+void u32_swap(void *a, void *b, int size)
+{
+	u32 t = *(u32 *)a;
+	*(u32 *)a = *(u32 *)b;
+	*(u32 *)b = t;
+}
+
+void generic_swap(void *a, void *b, int size)
+{
+	char t;
+
+	do {
+		t = *(char *)a;
+		*(char *)a++ = *(char *)b;
+		*(char *)b++ = t;
+	} while (--size > 0);
+}
+
+/*
+ * sort - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp: pointer to comparison function
+ * @swap: pointer to swap function or NULL
+ *
+ * This function does a heapsort on the given array. You may provide a
+ * swap function optimized to your element type.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * qsort is about 20% faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+
+void sort(void *base, size_t num, size_t size,
+	  int (*cmp)(const void *, const void *),
+	  void (*swap)(void *, void *, int size))
+{
+	/* pre-scale counters for performance */
+	int i = (num/2) * size, n = num * size, c, r;
+
+	if (!swap)
+		swap = (size == 4 ? u32_swap : generic_swap);
+
+	/* heapify */
+	for ( ; i >= 0; i -= size) {
+		for (r = i; r * 2 < n; r  = c) {
+			c = r * 2;
+			if (c < n - size && cmp(base + c, base + c + size) < 0)
+				c += size;
+			if (cmp(base + r, base + c) >= 0)
+				break;
+			swap(base + r, base + c, size);
+		}
+	}
+
+	/* sort */
+	for (i = n - size; i >= 0; i -= size) {
+		swap(base, base + i, size);
+		for (r = 0; r * 2 < i; r = c) {
+			c = r * 2;
+			if (c < i - size && cmp(base + c, base + c + size) < 0)
+				c += size;
+			if (cmp(base + r, base + c) >= 0)
+				break;
+			swap(base + r, base + c, size);
+		}
+	}
+}
+
+EXPORT_SYMBOL(sort);
+
+#if 0
+/* a simple boot-time regression test */
+
+int cmpint(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int sort_test(void)
+{
+	int *a, i, r = 0;
+
+	a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
+	BUG_ON(!a);
+
+	printk("testing sort()\n");
+
+	for (i = 0; i < 1000; i++) {
+		r = (r * 725861) % 6599;
+		a[i] = r;
+	}
+
+	sort(a, 1000, sizeof(int), cmpint, NULL);
+
+	for (i = 0; i < 999; i++)
+		if (a[i] > a[i+1]) {
+			printk("sort() failed!\n");
+			break;
+		}
+
+	kfree(a);
+
+	return 0;
+}
+
+module_init(sort_test);
+#endif
diff --git a/lib/string.c b/lib/string.c
new file mode 100644
index 0000000..4bb93ad
--- /dev/null
+++ b/lib/string.c
@@ -0,0 +1,601 @@
+/*
+ *  linux/lib/string.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * stupid library routines.. The optimized versions should generally be found
+ * as inline code in <asm-xx/string.h>
+ *
+ * These are buggy as well..
+ *
+ * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de>
+ * -  Added strsep() which will replace strtok() soon (because strsep() is
+ *    reentrant and should be faster). Use only strsep() in new code, please.
+ *
+ * * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>,
+ *                    Matthew Hawkins <matt@mh.dropbear.id.au>
+ * -  Kissed strtok() goodbye
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+#ifndef __HAVE_ARCH_STRNICMP
+/**
+ * strnicmp - Case insensitive, length-limited string comparison
+ * @s1: One string
+ * @s2: The other string
+ * @len: the maximum number of characters to compare
+ */
+int strnicmp(const char *s1, const char *s2, size_t len)
+{
+	/* Yes, Virginia, it had better be unsigned */
+	unsigned char c1, c2;
+
+	c1 = 0;	c2 = 0;
+	if (len) {
+		do {
+			c1 = *s1; c2 = *s2;
+			s1++; s2++;
+			if (!c1)
+				break;
+			if (!c2)
+				break;
+			if (c1 == c2)
+				continue;
+			c1 = tolower(c1);
+			c2 = tolower(c2);
+			if (c1 != c2)
+				break;
+		} while (--len);
+	}
+	return (int)c1 - (int)c2;
+}
+
+EXPORT_SYMBOL(strnicmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCPY
+/**
+ * strcpy - Copy a %NUL terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ */
+char * strcpy(char * dest,const char *src)
+{
+	char *tmp = dest;
+
+	while ((*dest++ = *src++) != '\0')
+		/* nothing */;
+	return tmp;
+}
+EXPORT_SYMBOL(strcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCPY
+/**
+ * strncpy - Copy a length-limited, %NUL-terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: The maximum number of bytes to copy
+ *
+ * The result is not %NUL-terminated if the source exceeds
+ * @count bytes.
+ */
+char * strncpy(char * dest,const char *src,size_t count)
+{
+	char *tmp = dest;
+
+	while (count) {
+		if ((*tmp = *src) != 0) src++;
+		tmp++;
+		count--;
+	}
+	return dest;
+}
+EXPORT_SYMBOL(strncpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCPY
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+	size_t ret = strlen(src);
+
+	if (size) {
+		size_t len = (ret >= size) ? size-1 : ret;
+		memcpy(dest, src, len);
+		dest[len] = '\0';
+	}
+	return ret;
+}
+EXPORT_SYMBOL(strlcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRCAT
+/**
+ * strcat - Append one %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ */
+char * strcat(char * dest, const char * src)
+{
+	char *tmp = dest;
+
+	while (*dest)
+		dest++;
+	while ((*dest++ = *src++) != '\0')
+		;
+
+	return tmp;
+}
+EXPORT_SYMBOL(strcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCAT
+/**
+ * strncat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @count: The maximum numbers of bytes to copy
+ *
+ * Note that in contrast to strncpy, strncat ensures the result is
+ * terminated.
+ */
+char * strncat(char *dest, const char *src, size_t count)
+{
+	char *tmp = dest;
+
+	if (count) {
+		while (*dest)
+			dest++;
+		while ((*dest++ = *src++) != 0) {
+			if (--count == 0) {
+				*dest = '\0';
+				break;
+			}
+		}
+	}
+
+	return tmp;
+}
+EXPORT_SYMBOL(strncat);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCAT
+/**
+ * strlcat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @count: The size of the destination buffer.
+ */
+size_t strlcat(char *dest, const char *src, size_t count)
+{
+	size_t dsize = strlen(dest);
+	size_t len = strlen(src);
+	size_t res = dsize + len;
+
+	/* This would be a bug */
+	BUG_ON(dsize >= count);
+
+	dest += dsize;
+	count -= dsize;
+	if (len >= count)
+		len = count-1;
+	memcpy(dest, src, len);
+	dest[len] = 0;
+	return res;
+}
+EXPORT_SYMBOL(strlcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRCMP
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ */
+int strcmp(const char * cs,const char * ct)
+{
+	register signed char __res;
+
+	while (1) {
+		if ((__res = *cs - *ct++) != 0 || !*cs++)
+			break;
+	}
+
+	return __res;
+}
+EXPORT_SYMBOL(strcmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char * cs,const char * ct,size_t count)
+{
+	register signed char __res = 0;
+
+	while (count) {
+		if ((__res = *cs - *ct++) != 0 || !*cs++)
+			break;
+		count--;
+	}
+
+	return __res;
+}
+EXPORT_SYMBOL(strncmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCHR
+/**
+ * strchr - Find the first occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char * strchr(const char * s, int c)
+{
+	for(; *s != (char) c; ++s)
+		if (*s == '\0')
+			return NULL;
+	return (char *) s;
+}
+EXPORT_SYMBOL(strchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRRCHR
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char * strrchr(const char * s, int c)
+{
+       const char *p = s + strlen(s);
+       do {
+           if (*p == (char)c)
+               return (char *)p;
+       } while (--p >= s);
+       return NULL;
+}
+EXPORT_SYMBOL(strrchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCHR
+/**
+ * strnchr - Find a character in a length limited string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ */
+char *strnchr(const char *s, size_t count, int c)
+{
+	for (; count-- && *s != '\0'; ++s)
+		if (*s == (char) c)
+			return (char *) s;
+	return NULL;
+}
+EXPORT_SYMBOL(strnchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRLEN
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ */
+size_t strlen(const char * s)
+{
+	const char *sc;
+
+	for (sc = s; *sc != '\0'; ++sc)
+		/* nothing */;
+	return sc - s;
+}
+EXPORT_SYMBOL(strlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRNLEN
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @count: The maximum number of bytes to search
+ */
+size_t strnlen(const char * s, size_t count)
+{
+	const char *sc;
+
+	for (sc = s; count-- && *sc != '\0'; ++sc)
+		/* nothing */;
+	return sc - s;
+}
+EXPORT_SYMBOL(strnlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRSPN
+/**
+ * strspn - Calculate the length of the initial substring of @s which only
+ * 	contain letters in @accept
+ * @s: The string to be searched
+ * @accept: The string to search for
+ */
+size_t strspn(const char *s, const char *accept)
+{
+	const char *p;
+	const char *a;
+	size_t count = 0;
+
+	for (p = s; *p != '\0'; ++p) {
+		for (a = accept; *a != '\0'; ++a) {
+			if (*p == *a)
+				break;
+		}
+		if (*a == '\0')
+			return count;
+		++count;
+	}
+
+	return count;
+}
+
+EXPORT_SYMBOL(strspn);
+#endif
+
+/**
+ * strcspn - Calculate the length of the initial substring of @s which does
+ * 	not contain letters in @reject
+ * @s: The string to be searched
+ * @reject: The string to avoid
+ */
+size_t strcspn(const char *s, const char *reject)
+{
+	const char *p;
+	const char *r;
+	size_t count = 0;
+
+	for (p = s; *p != '\0'; ++p) {
+		for (r = reject; *r != '\0'; ++r) {
+			if (*p == *r)
+				return count;
+		}
+		++count;
+	}
+
+	return count;
+}	
+EXPORT_SYMBOL(strcspn);
+
+#ifndef __HAVE_ARCH_STRPBRK
+/**
+ * strpbrk - Find the first occurrence of a set of characters
+ * @cs: The string to be searched
+ * @ct: The characters to search for
+ */
+char * strpbrk(const char * cs,const char * ct)
+{
+	const char *sc1,*sc2;
+
+	for( sc1 = cs; *sc1 != '\0'; ++sc1) {
+		for( sc2 = ct; *sc2 != '\0'; ++sc2) {
+			if (*sc1 == *sc2)
+				return (char *) sc1;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(strpbrk);
+#endif
+
+#ifndef __HAVE_ARCH_STRSEP
+/**
+ * strsep - Split a string into tokens
+ * @s: The string to be searched
+ * @ct: The characters to search for
+ *
+ * strsep() updates @s to point after the token, ready for the next call.
+ *
+ * It returns empty tokens, too, behaving exactly like the libc function
+ * of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
+ * Same semantics, slimmer shape. ;)
+ */
+char * strsep(char **s, const char *ct)
+{
+	char *sbegin = *s, *end;
+
+	if (sbegin == NULL)
+		return NULL;
+
+	end = strpbrk(sbegin, ct);
+	if (end)
+		*end++ = '\0';
+	*s = end;
+
+	return sbegin;
+}
+
+EXPORT_SYMBOL(strsep);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @count: The size of the area.
+ *
+ * Do not use memset() to access IO space, use memset_io() instead.
+ */
+void * memset(void * s,int c,size_t count)
+{
+	char *xs = (char *) s;
+
+	while (count--)
+		*xs++ = c;
+
+	return s;
+}
+EXPORT_SYMBOL(memset);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCPY
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * You should not use this function to access IO space, use memcpy_toio()
+ * or memcpy_fromio() instead.
+ */
+void * memcpy(void * dest,const void *src,size_t count)
+{
+	char *tmp = (char *) dest, *s = (char *) src;
+
+	while (count--)
+		*tmp++ = *s++;
+
+	return dest;
+}
+EXPORT_SYMBOL(memcpy);
+#endif
+
+#ifndef __HAVE_ARCH_MEMMOVE
+/**
+ * memmove - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * Unlike memcpy(), memmove() copes with overlapping areas.
+ */
+void * memmove(void * dest,const void *src,size_t count)
+{
+	char *tmp, *s;
+
+	if (dest <= src) {
+		tmp = (char *) dest;
+		s = (char *) src;
+		while (count--)
+			*tmp++ = *s++;
+		}
+	else {
+		tmp = (char *) dest + count;
+		s = (char *) src + count;
+		while (count--)
+			*--tmp = *--s;
+		}
+
+	return dest;
+}
+EXPORT_SYMBOL(memmove);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCMP
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+int memcmp(const void * cs,const void * ct,size_t count)
+{
+	const unsigned char *su1, *su2;
+	int res = 0;
+
+	for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+		if ((res = *su1 - *su2) != 0)
+			break;
+	return res;
+}
+EXPORT_SYMBOL(memcmp);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSCAN
+/**
+ * memscan - Find a character in an area of memory.
+ * @addr: The memory area
+ * @c: The byte to search for
+ * @size: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
+void * memscan(void * addr, int c, size_t size)
+{
+	unsigned char * p = (unsigned char *) addr;
+
+	while (size) {
+		if (*p == c)
+			return (void *) p;
+		p++;
+		size--;
+	}
+  	return (void *) p;
+}
+EXPORT_SYMBOL(memscan);
+#endif
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char * strstr(const char * s1,const char * s2)
+{
+	int l1, l2;
+
+	l2 = strlen(s2);
+	if (!l2)
+		return (char *) s1;
+	l1 = strlen(s1);
+	while (l1 >= l2) {
+		l1--;
+		if (!memcmp(s1,s2,l2))
+			return (char *) s1;
+		s1++;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(strstr);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCHR
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+	const unsigned char *p = s;
+	while (n-- != 0) {
+        	if ((unsigned char)c == *p++) {
+			return (void *)(p-1);
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(memchr);
+#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
new file mode 100644
index 0000000..a9bda0a
--- /dev/null
+++ b/lib/vsprintf.c
@@ -0,0 +1,846 @@
+/*
+ *  linux/lib/vsprintf.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+/* 
+ * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
+ * - changed to provide snprintf and vsnprintf functions
+ * So Feb  1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
+ * - scnprintf and vscnprintf
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#include <asm/div64.h>
+
+/**
+ * simple_strtoul - convert a string to an unsigned long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+	unsigned long result = 0,value;
+
+	if (!base) {
+		base = 10;
+		if (*cp == '0') {
+			base = 8;
+			cp++;
+			if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
+				cp++;
+				base = 16;
+			}
+		}
+	} else if (base == 16) {
+		if (cp[0] == '0' && toupper(cp[1]) == 'X')
+			cp += 2;
+	}
+	while (isxdigit(*cp) &&
+	       (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		cp++;
+	}
+	if (endp)
+		*endp = (char *)cp;
+	return result;
+}
+
+EXPORT_SYMBOL(simple_strtoul);
+
+/**
+ * simple_strtol - convert a string to a signed long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+long simple_strtol(const char *cp,char **endp,unsigned int base)
+{
+	if(*cp=='-')
+		return -simple_strtoul(cp+1,endp,base);
+	return simple_strtoul(cp,endp,base);
+}
+
+EXPORT_SYMBOL(simple_strtol);
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
+{
+	unsigned long long result = 0,value;
+
+	if (!base) {
+		base = 10;
+		if (*cp == '0') {
+			base = 8;
+			cp++;
+			if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
+				cp++;
+				base = 16;
+			}
+		}
+	} else if (base == 16) {
+		if (cp[0] == '0' && toupper(cp[1]) == 'X')
+			cp += 2;
+	}
+	while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
+	    ? toupper(*cp) : *cp)-'A'+10) < base) {
+		result = result*base + value;
+		cp++;
+	}
+	if (endp)
+		*endp = (char *)cp;
+	return result;
+}
+
+EXPORT_SYMBOL(simple_strtoull);
+
+/**
+ * simple_strtoll - convert a string to a signed long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+long long simple_strtoll(const char *cp,char **endp,unsigned int base)
+{
+	if(*cp=='-')
+		return -simple_strtoull(cp+1,endp,base);
+	return simple_strtoull(cp,endp,base);
+}
+
+static int skip_atoi(const char **s)
+{
+	int i=0;
+
+	while (isdigit(**s))
+		i = i*10 + *((*s)++) - '0';
+	return i;
+}
+
+#define ZEROPAD	1		/* pad with zero */
+#define SIGN	2		/* unsigned/signed long */
+#define PLUS	4		/* show plus */
+#define SPACE	8		/* space if plus */
+#define LEFT	16		/* left justified */
+#define SPECIAL	32		/* 0x */
+#define LARGE	64		/* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, unsigned long long num, int base, int size, int precision, int type)
+{
+	char c,sign,tmp[66];
+	const char *digits;
+	static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+	static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+	int i;
+
+	digits = (type & LARGE) ? large_digits : small_digits;
+	if (type & LEFT)
+		type &= ~ZEROPAD;
+	if (base < 2 || base > 36)
+		return NULL;
+	c = (type & ZEROPAD) ? '0' : ' ';
+	sign = 0;
+	if (type & SIGN) {
+		if ((signed long long) num < 0) {
+			sign = '-';
+			num = - (signed long long) num;
+			size--;
+		} else if (type & PLUS) {
+			sign = '+';
+			size--;
+		} else if (type & SPACE) {
+			sign = ' ';
+			size--;
+		}
+	}
+	if (type & SPECIAL) {
+		if (base == 16)
+			size -= 2;
+		else if (base == 8)
+			size--;
+	}
+	i = 0;
+	if (num == 0)
+		tmp[i++]='0';
+	else while (num != 0)
+		tmp[i++] = digits[do_div(num,base)];
+	if (i > precision)
+		precision = i;
+	size -= precision;
+	if (!(type&(ZEROPAD+LEFT))) {
+		while(size-->0) {
+			if (buf <= end)
+				*buf = ' ';
+			++buf;
+		}
+	}
+	if (sign) {
+		if (buf <= end)
+			*buf = sign;
+		++buf;
+	}
+	if (type & SPECIAL) {
+		if (base==8) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+		} else if (base==16) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+			if (buf <= end)
+				*buf = digits[33];
+			++buf;
+		}
+	}
+	if (!(type & LEFT)) {
+		while (size-- > 0) {
+			if (buf <= end)
+				*buf = c;
+			++buf;
+		}
+	}
+	while (i < precision--) {
+		if (buf <= end)
+			*buf = '0';
+		++buf;
+	}
+	while (i-- > 0) {
+		if (buf <= end)
+			*buf = tmp[i];
+		++buf;
+	}
+	while (size-- > 0) {
+		if (buf <= end)
+			*buf = ' ';
+		++buf;
+	}
+	return buf;
+}
+
+/**
+ * vsnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The return value is the number of characters which would
+ * be generated for the given input, excluding the trailing
+ * '\0', as per ISO C99. If you want to have the exact
+ * number of characters written into @buf as return value
+ * (not including the trailing '\0'), use vscnprintf. If the
+ * return is greater than or equal to @size, the resulting
+ * string is truncated.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want snprintf instead.
+ */
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+	int len;
+	unsigned long long num;
+	int i, base;
+	char *str, *end, c;
+	const char *s;
+
+	int flags;		/* flags to number() */
+
+	int field_width;	/* width of output field */
+	int precision;		/* min. # of digits for integers; max
+				   number of chars for from string */
+	int qualifier;		/* 'h', 'l', or 'L' for integer fields */
+				/* 'z' support added 23/7/1999 S.H.    */
+				/* 'z' changed to 'Z' --davidm 1/25/99 */
+
+	/* Reject out-of-range values early */
+	if (unlikely((int) size < 0)) {
+		/* There can be only one.. */
+		static int warn = 1;
+		WARN_ON(warn);
+		warn = 0;
+		return 0;
+	}
+
+	str = buf;
+	end = buf + size - 1;
+
+	if (end < buf - 1) {
+		end = ((void *) -1);
+		size = end - buf + 1;
+	}
+
+	for (; *fmt ; ++fmt) {
+		if (*fmt != '%') {
+			if (str <= end)
+				*str = *fmt;
+			++str;
+			continue;
+		}
+
+		/* process flags */
+		flags = 0;
+		repeat:
+			++fmt;		/* this also skips first '%' */
+			switch (*fmt) {
+				case '-': flags |= LEFT; goto repeat;
+				case '+': flags |= PLUS; goto repeat;
+				case ' ': flags |= SPACE; goto repeat;
+				case '#': flags |= SPECIAL; goto repeat;
+				case '0': flags |= ZEROPAD; goto repeat;
+			}
+
+		/* get field width */
+		field_width = -1;
+		if (isdigit(*fmt))
+			field_width = skip_atoi(&fmt);
+		else if (*fmt == '*') {
+			++fmt;
+			/* it's the next argument */
+			field_width = va_arg(args, int);
+			if (field_width < 0) {
+				field_width = -field_width;
+				flags |= LEFT;
+			}
+		}
+
+		/* get the precision */
+		precision = -1;
+		if (*fmt == '.') {
+			++fmt;	
+			if (isdigit(*fmt))
+				precision = skip_atoi(&fmt);
+			else if (*fmt == '*') {
+				++fmt;
+				/* it's the next argument */
+				precision = va_arg(args, int);
+			}
+			if (precision < 0)
+				precision = 0;
+		}
+
+		/* get the conversion qualifier */
+		qualifier = -1;
+		if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+		    *fmt =='Z' || *fmt == 'z') {
+			qualifier = *fmt;
+			++fmt;
+			if (qualifier == 'l' && *fmt == 'l') {
+				qualifier = 'L';
+				++fmt;
+			}
+		}
+
+		/* default base */
+		base = 10;
+
+		switch (*fmt) {
+			case 'c':
+				if (!(flags & LEFT)) {
+					while (--field_width > 0) {
+						if (str <= end)
+							*str = ' ';
+						++str;
+					}
+				}
+				c = (unsigned char) va_arg(args, int);
+				if (str <= end)
+					*str = c;
+				++str;
+				while (--field_width > 0) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+				continue;
+
+			case 's':
+				s = va_arg(args, char *);
+				if ((unsigned long)s < PAGE_SIZE)
+					s = "<NULL>";
+
+				len = strnlen(s, precision);
+
+				if (!(flags & LEFT)) {
+					while (len < field_width--) {
+						if (str <= end)
+							*str = ' ';
+						++str;
+					}
+				}
+				for (i = 0; i < len; ++i) {
+					if (str <= end)
+						*str = *s;
+					++str; ++s;
+				}
+				while (len < field_width--) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+				continue;
+
+			case 'p':
+				if (field_width == -1) {
+					field_width = 2*sizeof(void *);
+					flags |= ZEROPAD;
+				}
+				str = number(str, end,
+						(unsigned long) va_arg(args, void *),
+						16, field_width, precision, flags);
+				continue;
+
+
+			case 'n':
+				/* FIXME:
+				* What does C99 say about the overflow case here? */
+				if (qualifier == 'l') {
+					long * ip = va_arg(args, long *);
+					*ip = (str - buf);
+				} else if (qualifier == 'Z' || qualifier == 'z') {
+					size_t * ip = va_arg(args, size_t *);
+					*ip = (str - buf);
+				} else {
+					int * ip = va_arg(args, int *);
+					*ip = (str - buf);
+				}
+				continue;
+
+			case '%':
+				if (str <= end)
+					*str = '%';
+				++str;
+				continue;
+
+				/* integer number formats - set up the flags and "break" */
+			case 'o':
+				base = 8;
+				break;
+
+			case 'X':
+				flags |= LARGE;
+			case 'x':
+				base = 16;
+				break;
+
+			case 'd':
+			case 'i':
+				flags |= SIGN;
+			case 'u':
+				break;
+
+			default:
+				if (str <= end)
+					*str = '%';
+				++str;
+				if (*fmt) {
+					if (str <= end)
+						*str = *fmt;
+					++str;
+				} else {
+					--fmt;
+				}
+				continue;
+		}
+		if (qualifier == 'L')
+			num = va_arg(args, long long);
+		else if (qualifier == 'l') {
+			num = va_arg(args, unsigned long);
+			if (flags & SIGN)
+				num = (signed long) num;
+		} else if (qualifier == 'Z' || qualifier == 'z') {
+			num = va_arg(args, size_t);
+		} else if (qualifier == 'h') {
+			num = (unsigned short) va_arg(args, int);
+			if (flags & SIGN)
+				num = (signed short) num;
+		} else {
+			num = va_arg(args, unsigned int);
+			if (flags & SIGN)
+				num = (signed int) num;
+		}
+		str = number(str, end, num, base,
+				field_width, precision, flags);
+	}
+	if (str <= end)
+		*str = '\0';
+	else if (size > 0)
+		/* don't write out a null byte if the buf size is zero */
+		*end = '\0';
+	/* the trailing null byte doesn't count towards the total
+	* ++str;
+	*/
+	return str-buf;
+}
+
+EXPORT_SYMBOL(vsnprintf);
+
+/**
+ * vscnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The return value is the number of characters which have been written into
+ * the @buf not including the trailing '\0'. If @size is <= 0 the function
+ * returns 0.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want scnprintf instead.
+ */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+	int i;
+
+	i=vsnprintf(buf,size,fmt,args);
+	return (i >= size) ? (size - 1) : i;
+}
+
+EXPORT_SYMBOL(vscnprintf);
+
+/**
+ * snprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing null,
+ * as per ISO C99.  If the return is greater than or equal to
+ * @size, the resulting string is truncated.
+ */
+int snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i=vsnprintf(buf,size,fmt,args);
+	va_end(args);
+	return i;
+}
+
+EXPORT_SYMBOL(snprintf);
+
+/**
+ * scnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters written into @buf not including
+ * the trailing '\0'. If @size is <= 0 the function returns 0. If the return is
+ * greater than or equal to @size, the resulting string is truncated.
+ */
+
+int scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = vsnprintf(buf, size, fmt, args);
+	va_end(args);
+	return (i >= size) ? (size - 1) : i;
+}
+EXPORT_SYMBOL(scnprintf);
+
+/**
+ * vsprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use vsnprintf or vscnprintf in order to avoid
+ * buffer overflows.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want sprintf instead.
+ */
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+	return vsnprintf(buf, INT_MAX, fmt, args);
+}
+
+EXPORT_SYMBOL(vsprintf);
+
+/**
+ * sprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use snprintf or scnprintf in order to avoid
+ * buffer overflows.
+ */
+int sprintf(char * buf, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i=vsnprintf(buf, INT_MAX, fmt, args);
+	va_end(args);
+	return i;
+}
+
+EXPORT_SYMBOL(sprintf);
+
+/**
+ * vsscanf - Unformat a buffer into a list of arguments
+ * @buf:	input buffer
+ * @fmt:	format of buffer
+ * @args:	arguments
+ */
+int vsscanf(const char * buf, const char * fmt, va_list args)
+{
+	const char *str = buf;
+	char *next;
+	char digit;
+	int num = 0;
+	int qualifier;
+	int base;
+	int field_width;
+	int is_sign = 0;
+
+	while(*fmt && *str) {
+		/* skip any white space in format */
+		/* white space in format matchs any amount of
+		 * white space, including none, in the input.
+		 */
+		if (isspace(*fmt)) {
+			while (isspace(*fmt))
+				++fmt;
+			while (isspace(*str))
+				++str;
+		}
+
+		/* anything that is not a conversion must match exactly */
+		if (*fmt != '%' && *fmt) {
+			if (*fmt++ != *str++)
+				break;
+			continue;
+		}
+
+		if (!*fmt)
+			break;
+		++fmt;
+		
+		/* skip this conversion.
+		 * advance both strings to next white space
+		 */
+		if (*fmt == '*') {
+			while (!isspace(*fmt) && *fmt)
+				fmt++;
+			while (!isspace(*str) && *str)
+				str++;
+			continue;
+		}
+
+		/* get field width */
+		field_width = -1;
+		if (isdigit(*fmt))
+			field_width = skip_atoi(&fmt);
+
+		/* get conversion qualifier */
+		qualifier = -1;
+		if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+		    *fmt == 'Z' || *fmt == 'z') {
+			qualifier = *fmt++;
+			if (unlikely(qualifier == *fmt)) {
+				if (qualifier == 'h') {
+					qualifier = 'H';
+					fmt++;
+				} else if (qualifier == 'l') {
+					qualifier = 'L';
+					fmt++;
+				}
+			}
+		}
+		base = 10;
+		is_sign = 0;
+
+		if (!*fmt || !*str)
+			break;
+
+		switch(*fmt++) {
+		case 'c':
+		{
+			char *s = (char *) va_arg(args,char*);
+			if (field_width == -1)
+				field_width = 1;
+			do {
+				*s++ = *str++;
+			} while (--field_width > 0 && *str);
+			num++;
+		}
+		continue;
+		case 's':
+		{
+			char *s = (char *) va_arg(args, char *);
+			if(field_width == -1)
+				field_width = INT_MAX;
+			/* first, skip leading white space in buffer */
+			while (isspace(*str))
+				str++;
+
+			/* now copy until next white space */
+			while (*str && !isspace(*str) && field_width--) {
+				*s++ = *str++;
+			}
+			*s = '\0';
+			num++;
+		}
+		continue;
+		case 'n':
+			/* return number of characters read so far */
+		{
+			int *i = (int *)va_arg(args,int*);
+			*i = str - buf;
+		}
+		continue;
+		case 'o':
+			base = 8;
+			break;
+		case 'x':
+		case 'X':
+			base = 16;
+			break;
+		case 'i':
+                        base = 0;
+		case 'd':
+			is_sign = 1;
+		case 'u':
+			break;
+		case '%':
+			/* looking for '%' in str */
+			if (*str++ != '%') 
+				return num;
+			continue;
+		default:
+			/* invalid format; stop here */
+			return num;
+		}
+
+		/* have some sort of integer conversion.
+		 * first, skip white space in buffer.
+		 */
+		while (isspace(*str))
+			str++;
+
+		digit = *str;
+		if (is_sign && digit == '-')
+			digit = *(str + 1);
+
+		if (!digit
+                    || (base == 16 && !isxdigit(digit))
+                    || (base == 10 && !isdigit(digit))
+                    || (base == 8 && (!isdigit(digit) || digit > '7'))
+                    || (base == 0 && !isdigit(digit)))
+				break;
+
+		switch(qualifier) {
+		case 'H':	/* that's 'hh' in format */
+			if (is_sign) {
+				signed char *s = (signed char *) va_arg(args,signed char *);
+				*s = (signed char) simple_strtol(str,&next,base);
+			} else {
+				unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
+				*s = (unsigned char) simple_strtoul(str, &next, base);
+			}
+			break;
+		case 'h':
+			if (is_sign) {
+				short *s = (short *) va_arg(args,short *);
+				*s = (short) simple_strtol(str,&next,base);
+			} else {
+				unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
+				*s = (unsigned short) simple_strtoul(str, &next, base);
+			}
+			break;
+		case 'l':
+			if (is_sign) {
+				long *l = (long *) va_arg(args,long *);
+				*l = simple_strtol(str,&next,base);
+			} else {
+				unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
+				*l = simple_strtoul(str,&next,base);
+			}
+			break;
+		case 'L':
+			if (is_sign) {
+				long long *l = (long long*) va_arg(args,long long *);
+				*l = simple_strtoll(str,&next,base);
+			} else {
+				unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
+				*l = simple_strtoull(str,&next,base);
+			}
+			break;
+		case 'Z':
+		case 'z':
+		{
+			size_t *s = (size_t*) va_arg(args,size_t*);
+			*s = (size_t) simple_strtoul(str,&next,base);
+		}
+		break;
+		default:
+			if (is_sign) {
+				int *i = (int *) va_arg(args, int*);
+				*i = (int) simple_strtol(str,&next,base);
+			} else {
+				unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
+				*i = (unsigned int) simple_strtoul(str,&next,base);
+			}
+			break;
+		}
+		num++;
+
+		if (!next)
+			break;
+		str = next;
+	}
+	return num;
+}
+
+EXPORT_SYMBOL(vsscanf);
+
+/**
+ * sscanf - Unformat a buffer into a list of arguments
+ * @buf:	input buffer
+ * @fmt:	formatting of buffer
+ * @...:	resulting arguments
+ */
+int sscanf(const char * buf, const char * fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args,fmt);
+	i = vsscanf(buf,fmt,args);
+	va_end(args);
+	return i;
+}
+
+EXPORT_SYMBOL(sscanf);
diff --git a/lib/zlib_deflate/Makefile b/lib/zlib_deflate/Makefile
new file mode 100644
index 0000000..86275e3
--- /dev/null
+++ b/lib/zlib_deflate/Makefile
@@ -0,0 +1,11 @@
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the compression code, see zlib_inflate for the
+# decompression code.
+#
+
+obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate.o
+
+zlib_deflate-objs := deflate.o deftree.o deflate_syms.o
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
new file mode 100644
index 0000000..ad9a1bf
--- /dev/null
+++ b/lib/zlib_deflate/deflate.c
@@ -0,0 +1,1268 @@
+/* +++ deflate.c */
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/*
+ *  ALGORITHM
+ *
+ *      The "deflation" process depends on being able to identify portions
+ *      of the input text which are identical to earlier input (within a
+ *      sliding window trailing behind the input currently being processed).
+ *
+ *      The most straightforward technique turns out to be the fastest for
+ *      most input files: try all possible matches and select the longest.
+ *      The key feature of this algorithm is that insertions into the string
+ *      dictionary are very simple and thus fast, and deletions are avoided
+ *      completely. Insertions are performed at each input character, whereas
+ *      string matches are performed only when the previous match ends. So it
+ *      is preferable to spend more time in matches to allow very fast string
+ *      insertions and avoid deletions. The matching algorithm for small
+ *      strings is inspired from that of Rabin & Karp. A brute force approach
+ *      is used to find longer strings when a small match has been found.
+ *      A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ *      (by Leonid Broukhis).
+ *         A previous version of this file used a more sophisticated algorithm
+ *      (by Fiala and Greene) which is guaranteed to run in linear amortized
+ *      time, but has a larger average cost, uses more memory and is patented.
+ *      However the F&G algorithm may be faster for some highly redundant
+ *      files if the parameter max_chain_length (described below) is too large.
+ *
+ *  ACKNOWLEDGEMENTS
+ *
+ *      The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ *      I found it in 'freeze' written by Leonid Broukhis.
+ *      Thanks to many people for bug reports and testing.
+ *
+ *  REFERENCES
+ *
+ *      Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ *      Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ *      A description of the Rabin and Karp algorithm is given in the book
+ *         "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ *      Fiala,E.R., and Greene,D.H.
+ *         Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include "defutil.h"
+
+
+/* ===========================================================================
+ *  Function prototypes.
+ */
+typedef enum {
+    need_more,      /* block not completed, need more input or more output */
+    block_done,     /* block flush performed */
+    finish_started, /* finish started, need only more output at next deflate */
+    finish_done     /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) (deflate_state *s, int flush);
+/* Compression function. Returns the block state after the call. */
+
+static void fill_window    (deflate_state *s);
+static block_state deflate_stored (deflate_state *s, int flush);
+static block_state deflate_fast   (deflate_state *s, int flush);
+static block_state deflate_slow   (deflate_state *s, int flush);
+static void lm_init        (deflate_state *s);
+static void putShortMSB    (deflate_state *s, uInt b);
+static void flush_pending  (z_streamp strm);
+static int read_buf        (z_streamp strm, Byte *buf, unsigned size);
+static uInt longest_match  (deflate_state *s, IPos cur_match);
+
+#ifdef DEBUG_ZLIB
+static  void check_match (deflate_state *s, IPos start, IPos match,
+                         int length);
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+#  define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+   ush good_length; /* reduce lazy search above this match length */
+   ush max_lazy;    /* do not perform lazy search above this match length */
+   ush nice_length; /* quit search above this match length */
+   ush max_chain;
+   compress_func func;
+} config;
+
+static const config configuration_table[10] = {
+/*      good lazy nice chain */
+/* 0 */ {0,    0,  0,    0, deflate_stored},  /* store only */
+/* 1 */ {4,    4,  8,    4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4,    5, 16,    8, deflate_fast},
+/* 3 */ {4,    6, 32,   32, deflate_fast},
+
+/* 4 */ {4,    4, 16,   16, deflate_slow},  /* lazy matches */
+/* 5 */ {8,   16, 32,   32, deflate_slow},
+/* 6 */ {8,   16, 128, 128, deflate_slow},
+/* 7 */ {8,   32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN  assertion: all calls to to UPDATE_HASH are made with consecutive
+ *    input characters, so that a running hash key can be computed from the
+ *    previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * IN  assertion: all calls to to INSERT_STRING are made with consecutive
+ *    input characters and the first MIN_MATCH bytes of str are valid
+ *    (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#define INSERT_STRING(s, str, match_head) \
+   (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+    s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+    s->head[s->ins_h] = (Pos)(str))
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+    s->head[s->hash_size-1] = NIL; \
+    memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int zlib_deflateInit_(
+	z_streamp strm,
+	int level,
+	const char *version,
+	int stream_size
+)
+{
+    return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS,
+			      DEF_MEM_LEVEL,
+			      Z_DEFAULT_STRATEGY, version, stream_size);
+    /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int zlib_deflateInit2_(
+	z_streamp strm,
+	int  level,
+	int  method,
+	int  windowBits,
+	int  memLevel,
+	int  strategy,
+	const char *version,
+	int stream_size
+)
+{
+    deflate_state *s;
+    int noheader = 0;
+    static char* my_version = ZLIB_VERSION;
+    deflate_workspace *mem;
+
+    ush *overlay;
+    /* We overlay pending_buf and d_buf+l_buf. This works since the average
+     * output size for (length,distance) codes is <= 24 bits.
+     */
+
+    if (version == NULL || version[0] != my_version[0] ||
+        stream_size != sizeof(z_stream)) {
+	return Z_VERSION_ERROR;
+    }
+    if (strm == NULL) return Z_STREAM_ERROR;
+
+    strm->msg = NULL;
+
+    if (level == Z_DEFAULT_COMPRESSION) level = 6;
+
+    mem = (deflate_workspace *) strm->workspace;
+
+    if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+        noheader = 1;
+        windowBits = -windowBits;
+    }
+    if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+        windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
+	strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+        return Z_STREAM_ERROR;
+    }
+    s = (deflate_state *) &(mem->deflate_memory);
+    strm->state = (struct internal_state *)s;
+    s->strm = strm;
+
+    s->noheader = noheader;
+    s->w_bits = windowBits;
+    s->w_size = 1 << s->w_bits;
+    s->w_mask = s->w_size - 1;
+
+    s->hash_bits = memLevel + 7;
+    s->hash_size = 1 << s->hash_bits;
+    s->hash_mask = s->hash_size - 1;
+    s->hash_shift =  ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+    s->window = (Byte *) mem->window_memory;
+    s->prev   = (Pos *)  mem->prev_memory;
+    s->head   = (Pos *)  mem->head_memory;
+
+    s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+    overlay = (ush *) mem->overlay_memory;
+    s->pending_buf = (uch *) overlay;
+    s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+    s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+    s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+    s->level = level;
+    s->strategy = strategy;
+    s->method = (Byte)method;
+
+    return zlib_deflateReset(strm);
+}
+
+/* ========================================================================= */
+int zlib_deflateSetDictionary(
+	z_streamp strm,
+	const Byte *dictionary,
+	uInt  dictLength
+)
+{
+    deflate_state *s;
+    uInt length = dictLength;
+    uInt n;
+    IPos hash_head = 0;
+
+    if (strm == NULL || strm->state == NULL || dictionary == NULL)
+	return Z_STREAM_ERROR;
+
+    s = (deflate_state *) strm->state;
+    if (s->status != INIT_STATE) return Z_STREAM_ERROR;
+
+    strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
+
+    if (length < MIN_MATCH) return Z_OK;
+    if (length > MAX_DIST(s)) {
+	length = MAX_DIST(s);
+#ifndef USE_DICT_HEAD
+	dictionary += dictLength - length; /* use the tail of the dictionary */
+#endif
+    }
+    memcpy((char *)s->window, dictionary, length);
+    s->strstart = length;
+    s->block_start = (long)length;
+
+    /* Insert all strings in the hash table (except for the last two bytes).
+     * s->lookahead stays null, so s->ins_h will be recomputed at the next
+     * call of fill_window.
+     */
+    s->ins_h = s->window[0];
+    UPDATE_HASH(s, s->ins_h, s->window[1]);
+    for (n = 0; n <= length - MIN_MATCH; n++) {
+	INSERT_STRING(s, n, hash_head);
+    }
+    if (hash_head) hash_head = 0;  /* to make compiler happy */
+    return Z_OK;
+}
+
+/* ========================================================================= */
+int zlib_deflateReset(
+	z_streamp strm
+)
+{
+    deflate_state *s;
+    
+    if (strm == NULL || strm->state == NULL)
+        return Z_STREAM_ERROR;
+
+    strm->total_in = strm->total_out = 0;
+    strm->msg = NULL;
+    strm->data_type = Z_UNKNOWN;
+
+    s = (deflate_state *)strm->state;
+    s->pending = 0;
+    s->pending_out = s->pending_buf;
+
+    if (s->noheader < 0) {
+        s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+    }
+    s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+    strm->adler = 1;
+    s->last_flush = Z_NO_FLUSH;
+
+    zlib_tr_init(s);
+    lm_init(s);
+
+    return Z_OK;
+}
+
+/* ========================================================================= */
+int zlib_deflateParams(
+	z_streamp strm,
+	int level,
+	int strategy
+)
+{
+    deflate_state *s;
+    compress_func func;
+    int err = Z_OK;
+
+    if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+    s = (deflate_state *) strm->state;
+
+    if (level == Z_DEFAULT_COMPRESSION) {
+	level = 6;
+    }
+    if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+	return Z_STREAM_ERROR;
+    }
+    func = configuration_table[s->level].func;
+
+    if (func != configuration_table[level].func && strm->total_in != 0) {
+	/* Flush the last buffer: */
+	err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
+    }
+    if (s->level != level) {
+	s->level = level;
+	s->max_lazy_match   = configuration_table[level].max_lazy;
+	s->good_match       = configuration_table[level].good_length;
+	s->nice_match       = configuration_table[level].nice_length;
+	s->max_chain_length = configuration_table[level].max_chain;
+    }
+    s->strategy = strategy;
+    return err;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+static void putShortMSB(
+	deflate_state *s,
+	uInt b
+)
+{
+    put_byte(s, (Byte)(b >> 8));
+    put_byte(s, (Byte)(b & 0xff));
+}   
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+static void flush_pending(
+	z_streamp strm
+)
+{
+    deflate_state *s = (deflate_state *) strm->state;
+    unsigned len = s->pending;
+
+    if (len > strm->avail_out) len = strm->avail_out;
+    if (len == 0) return;
+
+    if (strm->next_out != NULL) {
+	memcpy(strm->next_out, s->pending_out, len);
+	strm->next_out += len;
+    }
+    s->pending_out += len;
+    strm->total_out += len;
+    strm->avail_out  -= len;
+    s->pending -= len;
+    if (s->pending == 0) {
+        s->pending_out = s->pending_buf;
+    }
+}
+
+/* ========================================================================= */
+int zlib_deflate(
+	z_streamp strm,
+	int flush
+)
+{
+    int old_flush; /* value of flush param for previous deflate call */
+    deflate_state *s;
+
+    if (strm == NULL || strm->state == NULL ||
+	flush > Z_FINISH || flush < 0) {
+        return Z_STREAM_ERROR;
+    }
+    s = (deflate_state *) strm->state;
+
+    if ((strm->next_in == NULL && strm->avail_in != 0) ||
+	(s->status == FINISH_STATE && flush != Z_FINISH)) {
+        return Z_STREAM_ERROR;
+    }
+    if (strm->avail_out == 0) return Z_BUF_ERROR;
+
+    s->strm = strm; /* just in case */
+    old_flush = s->last_flush;
+    s->last_flush = flush;
+
+    /* Write the zlib header */
+    if (s->status == INIT_STATE) {
+
+        uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+        uInt level_flags = (s->level-1) >> 1;
+
+        if (level_flags > 3) level_flags = 3;
+        header |= (level_flags << 6);
+	if (s->strstart != 0) header |= PRESET_DICT;
+        header += 31 - (header % 31);
+
+        s->status = BUSY_STATE;
+        putShortMSB(s, header);
+
+	/* Save the adler32 of the preset dictionary: */
+	if (s->strstart != 0) {
+	    putShortMSB(s, (uInt)(strm->adler >> 16));
+	    putShortMSB(s, (uInt)(strm->adler & 0xffff));
+	}
+	strm->adler = 1L;
+    }
+
+    /* Flush as much pending output as possible */
+    if (s->pending != 0) {
+        flush_pending(strm);
+        if (strm->avail_out == 0) {
+	    /* Since avail_out is 0, deflate will be called again with
+	     * more output space, but possibly with both pending and
+	     * avail_in equal to zero. There won't be anything to do,
+	     * but this is not an error situation so make sure we
+	     * return OK instead of BUF_ERROR at next call of deflate:
+             */
+	    s->last_flush = -1;
+	    return Z_OK;
+	}
+
+    /* Make sure there is something to do and avoid duplicate consecutive
+     * flushes. For repeated and useless calls with Z_FINISH, we keep
+     * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+     */
+    } else if (strm->avail_in == 0 && flush <= old_flush &&
+	       flush != Z_FINISH) {
+        return Z_BUF_ERROR;
+    }
+
+    /* User must not provide more input after the first FINISH: */
+    if (s->status == FINISH_STATE && strm->avail_in != 0) {
+        return Z_BUF_ERROR;
+    }
+
+    /* Start a new block or continue the current one.
+     */
+    if (strm->avail_in != 0 || s->lookahead != 0 ||
+        (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+        block_state bstate;
+
+	bstate = (*(configuration_table[s->level].func))(s, flush);
+
+        if (bstate == finish_started || bstate == finish_done) {
+            s->status = FINISH_STATE;
+        }
+        if (bstate == need_more || bstate == finish_started) {
+	    if (strm->avail_out == 0) {
+	        s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+	    }
+	    return Z_OK;
+	    /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+	     * of deflate should use the same flush parameter to make sure
+	     * that the flush is complete. So we don't have to output an
+	     * empty block here, this will be done at next call. This also
+	     * ensures that for a very small output buffer, we emit at most
+	     * one empty block.
+	     */
+	}
+        if (bstate == block_done) {
+            if (flush == Z_PARTIAL_FLUSH) {
+                zlib_tr_align(s);
+	    } else if (flush == Z_PACKET_FLUSH) {
+		/* Output just the 3-bit `stored' block type value,
+		   but not a zero length. */
+		zlib_tr_stored_type_only(s);
+            } else { /* FULL_FLUSH or SYNC_FLUSH */
+                zlib_tr_stored_block(s, (char*)0, 0L, 0);
+                /* For a full flush, this empty block will be recognized
+                 * as a special marker by inflate_sync().
+                 */
+                if (flush == Z_FULL_FLUSH) {
+                    CLEAR_HASH(s);             /* forget history */
+                }
+            }
+            flush_pending(strm);
+	    if (strm->avail_out == 0) {
+	      s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+	      return Z_OK;
+	    }
+        }
+    }
+    Assert(strm->avail_out > 0, "bug2");
+
+    if (flush != Z_FINISH) return Z_OK;
+    if (s->noheader) return Z_STREAM_END;
+
+    /* Write the zlib trailer (adler32) */
+    putShortMSB(s, (uInt)(strm->adler >> 16));
+    putShortMSB(s, (uInt)(strm->adler & 0xffff));
+    flush_pending(strm);
+    /* If avail_out is zero, the application will call deflate again
+     * to flush the rest.
+     */
+    s->noheader = -1; /* write the trailer only once! */
+    return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int zlib_deflateEnd(
+	z_streamp strm
+)
+{
+    int status;
+    deflate_state *s;
+
+    if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+    s = (deflate_state *) strm->state;
+
+    status = s->status;
+    if (status != INIT_STATE && status != BUSY_STATE &&
+	status != FINISH_STATE) {
+      return Z_STREAM_ERROR;
+    }
+
+    strm->state = NULL;
+
+    return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ */
+int zlib_deflateCopy (
+	z_streamp dest,
+	z_streamp source
+)
+{
+#ifdef MAXSEG_64K
+    return Z_STREAM_ERROR;
+#else
+    deflate_state *ds;
+    deflate_state *ss;
+    ush *overlay;
+    deflate_workspace *mem;
+
+
+    if (source == NULL || dest == NULL || source->state == NULL) {
+        return Z_STREAM_ERROR;
+    }
+
+    ss = (deflate_state *) source->state;
+
+    *dest = *source;
+
+    mem = (deflate_workspace *) dest->workspace;
+
+    ds = &(mem->deflate_memory);
+
+    dest->state = (struct internal_state *) ds;
+    *ds = *ss;
+    ds->strm = dest;
+
+    ds->window = (Byte *) mem->window_memory;
+    ds->prev   = (Pos *)  mem->prev_memory;
+    ds->head   = (Pos *)  mem->head_memory;
+    overlay = (ush *) mem->overlay_memory;
+    ds->pending_buf = (uch *) overlay;
+
+    memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+    memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+    memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+    memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+    ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+    ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+    ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+    ds->l_desc.dyn_tree = ds->dyn_ltree;
+    ds->d_desc.dyn_tree = ds->dyn_dtree;
+    ds->bl_desc.dyn_tree = ds->bl_tree;
+
+    return Z_OK;
+#endif
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read.  All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+static int read_buf(
+	z_streamp strm,
+	Byte *buf,
+	unsigned size
+)
+{
+    unsigned len = strm->avail_in;
+
+    if (len > size) len = size;
+    if (len == 0) return 0;
+
+    strm->avail_in  -= len;
+
+    if (!((deflate_state *)(strm->state))->noheader) {
+        strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
+    }
+    memcpy(buf, strm->next_in, len);
+    strm->next_in  += len;
+    strm->total_in += len;
+
+    return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+static void lm_init(
+	deflate_state *s
+)
+{
+    s->window_size = (ulg)2L*s->w_size;
+
+    CLEAR_HASH(s);
+
+    /* Set the default configuration parameters:
+     */
+    s->max_lazy_match   = configuration_table[s->level].max_lazy;
+    s->good_match       = configuration_table[s->level].good_length;
+    s->nice_match       = configuration_table[s->level].nice_length;
+    s->max_chain_length = configuration_table[s->level].max_chain;
+
+    s->strstart = 0;
+    s->block_start = 0L;
+    s->lookahead = 0;
+    s->match_length = s->prev_length = MIN_MATCH-1;
+    s->match_available = 0;
+    s->ins_h = 0;
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ *   string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+static uInt longest_match(
+	deflate_state *s,
+	IPos cur_match			/* current match */
+)
+{
+    unsigned chain_length = s->max_chain_length;/* max hash chain length */
+    register Byte *scan = s->window + s->strstart; /* current string */
+    register Byte *match;                       /* matched string */
+    register int len;                           /* length of current match */
+    int best_len = s->prev_length;              /* best match length so far */
+    int nice_match = s->nice_match;             /* stop if match long enough */
+    IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+        s->strstart - (IPos)MAX_DIST(s) : NIL;
+    /* Stop when cur_match becomes <= limit. To simplify the code,
+     * we prevent matches with the string of window index 0.
+     */
+    Pos *prev = s->prev;
+    uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+    /* Compare two bytes at a time. Note: this is not always beneficial.
+     * Try with and without -DUNALIGNED_OK to check.
+     */
+    register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
+    register ush scan_start = *(ush*)scan;
+    register ush scan_end   = *(ush*)(scan+best_len-1);
+#else
+    register Byte *strend = s->window + s->strstart + MAX_MATCH;
+    register Byte scan_end1  = scan[best_len-1];
+    register Byte scan_end   = scan[best_len];
+#endif
+
+    /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+     * It is easy to get rid of this optimization if necessary.
+     */
+    Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+    /* Do not waste too much time if we already have a good match: */
+    if (s->prev_length >= s->good_match) {
+        chain_length >>= 2;
+    }
+    /* Do not look for matches beyond the end of the input. This is necessary
+     * to make deflate deterministic.
+     */
+    if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+    Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+    do {
+        Assert(cur_match < s->strstart, "no future");
+        match = s->window + cur_match;
+
+        /* Skip to next match if the match length cannot increase
+         * or if the match length is less than 2:
+         */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+        /* This code assumes sizeof(unsigned short) == 2. Do not use
+         * UNALIGNED_OK if your compiler uses a different size.
+         */
+        if (*(ush*)(match+best_len-1) != scan_end ||
+            *(ush*)match != scan_start) continue;
+
+        /* It is not necessary to compare scan[2] and match[2] since they are
+         * always equal when the other bytes match, given that the hash keys
+         * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+         * strstart+3, +5, ... up to strstart+257. We check for insufficient
+         * lookahead only every 4th comparison; the 128th check will be made
+         * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+         * necessary to put more guard bytes at the end of the window, or
+         * to check more often for insufficient lookahead.
+         */
+        Assert(scan[2] == match[2], "scan[2]?");
+        scan++, match++;
+        do {
+        } while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
+                 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+                 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+                 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+                 scan < strend);
+        /* The funny "do {}" generates better code on most compilers */
+
+        /* Here, scan <= window+strstart+257 */
+        Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+        if (*scan == *match) scan++;
+
+        len = (MAX_MATCH - 1) - (int)(strend-scan);
+        scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+        if (match[best_len]   != scan_end  ||
+            match[best_len-1] != scan_end1 ||
+            *match            != *scan     ||
+            *++match          != scan[1])      continue;
+
+        /* The check at best_len-1 can be removed because it will be made
+         * again later. (This heuristic is not always a win.)
+         * It is not necessary to compare scan[2] and match[2] since they
+         * are always equal when the other bytes match, given that
+         * the hash keys are equal and that HASH_BITS >= 8.
+         */
+        scan += 2, match++;
+        Assert(*scan == *match, "match[2]?");
+
+        /* We check for insufficient lookahead only every 8th comparison;
+         * the 256th check will be made at strstart+258.
+         */
+        do {
+        } while (*++scan == *++match && *++scan == *++match &&
+                 *++scan == *++match && *++scan == *++match &&
+                 *++scan == *++match && *++scan == *++match &&
+                 *++scan == *++match && *++scan == *++match &&
+                 scan < strend);
+
+        Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+        len = MAX_MATCH - (int)(strend - scan);
+        scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+        if (len > best_len) {
+            s->match_start = cur_match;
+            best_len = len;
+            if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+            scan_end = *(ush*)(scan+best_len-1);
+#else
+            scan_end1  = scan[best_len-1];
+            scan_end   = scan[best_len];
+#endif
+        }
+    } while ((cur_match = prev[cur_match & wmask]) > limit
+             && --chain_length != 0);
+
+    if ((uInt)best_len <= s->lookahead) return best_len;
+    return s->lookahead;
+}
+
+#ifdef DEBUG_ZLIB
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+static void check_match(
+	deflate_state *s,
+	IPos start,
+	IPos match,
+	int length
+)
+{
+    /* check that the match is indeed a match */
+    if (memcmp((char *)s->window + match,
+                (char *)s->window + start, length) != EQUAL) {
+        fprintf(stderr, " start %u, match %u, length %d\n",
+		start, match, length);
+        do {
+	    fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+	} while (--length != 0);
+        z_error("invalid match");
+    }
+    if (z_verbose > 1) {
+        fprintf(stderr,"\\[%d,%d]", start-match, length);
+        do { putc(s->window[start++], stderr); } while (--length != 0);
+    }
+}
+#else
+#  define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ *    At least one byte has been read, or avail_in == 0; reads are
+ *    performed for at least two bytes (required for the zip translate_eol
+ *    option -- not supported here).
+ */
+static void fill_window(
+	deflate_state *s
+)
+{
+    register unsigned n, m;
+    register Pos *p;
+    unsigned more;    /* Amount of free space at the end of the window. */
+    uInt wsize = s->w_size;
+
+    do {
+        more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+        /* Deal with !@#$% 64K limit: */
+        if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+            more = wsize;
+
+        } else if (more == (unsigned)(-1)) {
+            /* Very unlikely, but possible on 16 bit machine if strstart == 0
+             * and lookahead == 1 (input done one byte at time)
+             */
+            more--;
+
+        /* If the window is almost full and there is insufficient lookahead,
+         * move the upper half to the lower one to make room in the upper half.
+         */
+        } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+            memcpy((char *)s->window, (char *)s->window+wsize,
+                   (unsigned)wsize);
+            s->match_start -= wsize;
+            s->strstart    -= wsize; /* we now have strstart >= MAX_DIST */
+            s->block_start -= (long) wsize;
+
+            /* Slide the hash table (could be avoided with 32 bit values
+               at the expense of memory usage). We slide even when level == 0
+               to keep the hash table consistent if we switch back to level > 0
+               later. (Using level 0 permanently is not an optimal usage of
+               zlib, so we don't care about this pathological case.)
+             */
+            n = s->hash_size;
+            p = &s->head[n];
+            do {
+                m = *--p;
+                *p = (Pos)(m >= wsize ? m-wsize : NIL);
+            } while (--n);
+
+            n = wsize;
+            p = &s->prev[n];
+            do {
+                m = *--p;
+                *p = (Pos)(m >= wsize ? m-wsize : NIL);
+                /* If n is not on any hash chain, prev[n] is garbage but
+                 * its value will never be used.
+                 */
+            } while (--n);
+            more += wsize;
+        }
+        if (s->strm->avail_in == 0) return;
+
+        /* If there was no sliding:
+         *    strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+         *    more == window_size - lookahead - strstart
+         * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+         * => more >= window_size - 2*WSIZE + 2
+         * In the BIG_MEM or MMAP case (not yet supported),
+         *   window_size == input_size + MIN_LOOKAHEAD  &&
+         *   strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+         * Otherwise, window_size == 2*WSIZE so more >= 2.
+         * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+         */
+        Assert(more >= 2, "more < 2");
+
+        n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+        s->lookahead += n;
+
+        /* Initialize the hash value now that we have some input: */
+        if (s->lookahead >= MIN_MATCH) {
+            s->ins_h = s->window[s->strstart];
+            UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+            Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+        }
+        /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+         * but this is not important since only literal bytes will be emitted.
+         */
+
+    } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+   zlib_tr_flush_block(s, (s->block_start >= 0L ? \
+                   (char *)&s->window[(unsigned)s->block_start] : \
+                   NULL), \
+		(ulg)((long)s->strstart - s->block_start), \
+		(eof)); \
+   s->block_start = s->strstart; \
+   flush_pending(s->strm); \
+   Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+   FLUSH_BLOCK_ONLY(s, eof); \
+   if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+static block_state deflate_stored(
+	deflate_state *s,
+	int flush
+)
+{
+    /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+     * to pending_buf_size, and each stored block has a 5 byte header:
+     */
+    ulg max_block_size = 0xffff;
+    ulg max_start;
+
+    if (max_block_size > s->pending_buf_size - 5) {
+        max_block_size = s->pending_buf_size - 5;
+    }
+
+    /* Copy as much as possible from input to output: */
+    for (;;) {
+        /* Fill the window as much as possible: */
+        if (s->lookahead <= 1) {
+
+            Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+		   s->block_start >= (long)s->w_size, "slide too late");
+
+            fill_window(s);
+            if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+            if (s->lookahead == 0) break; /* flush the current block */
+        }
+	Assert(s->block_start >= 0L, "block gone");
+
+	s->strstart += s->lookahead;
+	s->lookahead = 0;
+
+	/* Emit a stored block if pending_buf will be full: */
+ 	max_start = s->block_start + max_block_size;
+        if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+	    /* strstart == 0 is possible when wraparound on 16-bit machine */
+	    s->lookahead = (uInt)(s->strstart - max_start);
+	    s->strstart = (uInt)max_start;
+            FLUSH_BLOCK(s, 0);
+	}
+	/* Flush if we may have to slide, otherwise block_start may become
+         * negative and the data will be gone:
+         */
+        if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+            FLUSH_BLOCK(s, 0);
+	}
+    }
+    FLUSH_BLOCK(s, flush == Z_FINISH);
+    return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+static block_state deflate_fast(
+	deflate_state *s,
+	int flush
+)
+{
+    IPos hash_head = NIL; /* head of the hash chain */
+    int bflush;           /* set if current block must be flushed */
+
+    for (;;) {
+        /* Make sure that we always have enough lookahead, except
+         * at the end of the input file. We need MAX_MATCH bytes
+         * for the next match, plus MIN_MATCH bytes to insert the
+         * string following the next match.
+         */
+        if (s->lookahead < MIN_LOOKAHEAD) {
+            fill_window(s);
+            if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+	        return need_more;
+	    }
+            if (s->lookahead == 0) break; /* flush the current block */
+        }
+
+        /* Insert the string window[strstart .. strstart+2] in the
+         * dictionary, and set hash_head to the head of the hash chain:
+         */
+        if (s->lookahead >= MIN_MATCH) {
+            INSERT_STRING(s, s->strstart, hash_head);
+        }
+
+        /* Find the longest match, discarding those <= prev_length.
+         * At this point we have always match_length < MIN_MATCH
+         */
+        if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+            /* To simplify the code, we prevent matches with the string
+             * of window index 0 (in particular we have to avoid a match
+             * of the string with itself at the start of the input file).
+             */
+            if (s->strategy != Z_HUFFMAN_ONLY) {
+                s->match_length = longest_match (s, hash_head);
+            }
+            /* longest_match() sets match_start */
+        }
+        if (s->match_length >= MIN_MATCH) {
+            check_match(s, s->strstart, s->match_start, s->match_length);
+
+            bflush = zlib_tr_tally(s, s->strstart - s->match_start,
+                               s->match_length - MIN_MATCH);
+
+            s->lookahead -= s->match_length;
+
+            /* Insert new strings in the hash table only if the match length
+             * is not too large. This saves time but degrades compression.
+             */
+            if (s->match_length <= s->max_insert_length &&
+                s->lookahead >= MIN_MATCH) {
+                s->match_length--; /* string at strstart already in hash table */
+                do {
+                    s->strstart++;
+                    INSERT_STRING(s, s->strstart, hash_head);
+                    /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+                     * always MIN_MATCH bytes ahead.
+                     */
+                } while (--s->match_length != 0);
+                s->strstart++; 
+            } else {
+                s->strstart += s->match_length;
+                s->match_length = 0;
+                s->ins_h = s->window[s->strstart];
+                UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+                Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+                /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+                 * matter since it will be recomputed at next deflate call.
+                 */
+            }
+        } else {
+            /* No match, output a literal byte */
+            Tracevv((stderr,"%c", s->window[s->strstart]));
+            bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
+            s->lookahead--;
+            s->strstart++; 
+        }
+        if (bflush) FLUSH_BLOCK(s, 0);
+    }
+    FLUSH_BLOCK(s, flush == Z_FINISH);
+    return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+static block_state deflate_slow(
+	deflate_state *s,
+	int flush
+)
+{
+    IPos hash_head = NIL;    /* head of hash chain */
+    int bflush;              /* set if current block must be flushed */
+
+    /* Process the input block. */
+    for (;;) {
+        /* Make sure that we always have enough lookahead, except
+         * at the end of the input file. We need MAX_MATCH bytes
+         * for the next match, plus MIN_MATCH bytes to insert the
+         * string following the next match.
+         */
+        if (s->lookahead < MIN_LOOKAHEAD) {
+            fill_window(s);
+            if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+	        return need_more;
+	    }
+            if (s->lookahead == 0) break; /* flush the current block */
+        }
+
+        /* Insert the string window[strstart .. strstart+2] in the
+         * dictionary, and set hash_head to the head of the hash chain:
+         */
+        if (s->lookahead >= MIN_MATCH) {
+            INSERT_STRING(s, s->strstart, hash_head);
+        }
+
+        /* Find the longest match, discarding those <= prev_length.
+         */
+        s->prev_length = s->match_length, s->prev_match = s->match_start;
+        s->match_length = MIN_MATCH-1;
+
+        if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+            s->strstart - hash_head <= MAX_DIST(s)) {
+            /* To simplify the code, we prevent matches with the string
+             * of window index 0 (in particular we have to avoid a match
+             * of the string with itself at the start of the input file).
+             */
+            if (s->strategy != Z_HUFFMAN_ONLY) {
+                s->match_length = longest_match (s, hash_head);
+            }
+            /* longest_match() sets match_start */
+
+            if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+                 (s->match_length == MIN_MATCH &&
+                  s->strstart - s->match_start > TOO_FAR))) {
+
+                /* If prev_match is also MIN_MATCH, match_start is garbage
+                 * but we will ignore the current match anyway.
+                 */
+                s->match_length = MIN_MATCH-1;
+            }
+        }
+        /* If there was a match at the previous step and the current
+         * match is not better, output the previous match:
+         */
+        if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+            uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+            /* Do not insert strings in hash table beyond this. */
+
+            check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+            bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
+				   s->prev_length - MIN_MATCH);
+
+            /* Insert in hash table all strings up to the end of the match.
+             * strstart-1 and strstart are already inserted. If there is not
+             * enough lookahead, the last two strings are not inserted in
+             * the hash table.
+             */
+            s->lookahead -= s->prev_length-1;
+            s->prev_length -= 2;
+            do {
+                if (++s->strstart <= max_insert) {
+                    INSERT_STRING(s, s->strstart, hash_head);
+                }
+            } while (--s->prev_length != 0);
+            s->match_available = 0;
+            s->match_length = MIN_MATCH-1;
+            s->strstart++;
+
+            if (bflush) FLUSH_BLOCK(s, 0);
+
+        } else if (s->match_available) {
+            /* If there was no match at the previous position, output a
+             * single literal. If there was a match but the current match
+             * is longer, truncate the previous match to a single literal.
+             */
+            Tracevv((stderr,"%c", s->window[s->strstart-1]));
+            if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
+                FLUSH_BLOCK_ONLY(s, 0);
+            }
+            s->strstart++;
+            s->lookahead--;
+            if (s->strm->avail_out == 0) return need_more;
+        } else {
+            /* There is no previous match to compare with, wait for
+             * the next step to decide.
+             */
+            s->match_available = 1;
+            s->strstart++;
+            s->lookahead--;
+        }
+    }
+    Assert (flush != Z_NO_FLUSH, "no flush?");
+    if (s->match_available) {
+        Tracevv((stderr,"%c", s->window[s->strstart-1]));
+        zlib_tr_tally (s, 0, s->window[s->strstart-1]);
+        s->match_available = 0;
+    }
+    FLUSH_BLOCK(s, flush == Z_FINISH);
+    return flush == Z_FINISH ? finish_done : block_done;
+}
+
+int zlib_deflate_workspacesize(void)
+{
+    return sizeof(deflate_workspace);
+}
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
new file mode 100644
index 0000000..5985b28
--- /dev/null
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -0,0 +1,21 @@
+/*
+ * linux/lib/zlib_deflate/deflate_syms.c
+ *
+ * Exported symbols for the deflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_deflate_workspacesize);
+EXPORT_SYMBOL(zlib_deflate);
+EXPORT_SYMBOL(zlib_deflateInit_);
+EXPORT_SYMBOL(zlib_deflateInit2_);
+EXPORT_SYMBOL(zlib_deflateEnd);
+EXPORT_SYMBOL(zlib_deflateReset);
+EXPORT_SYMBOL(zlib_deflateCopy);
+EXPORT_SYMBOL(zlib_deflateParams);
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
new file mode 100644
index 0000000..ddf3482
--- /dev/null
+++ b/lib/zlib_deflate/deftree.c
@@ -0,0 +1,1113 @@
+/* +++ trees.c */
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/*
+ *  ALGORITHM
+ *
+ *      The "deflation" process uses several Huffman trees. The more
+ *      common source values are represented by shorter bit sequences.
+ *
+ *      Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values).  The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ *  REFERENCES
+ *
+ *      Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ *      Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ *      Storer, James A.
+ *          Data Compression:  Methods and Theory, pp. 49-50.
+ *          Computer Science Press, 1988.  ISBN 0-7167-8156-5.
+ *
+ *      Sedgewick, R.
+ *          Algorithms, p290.
+ *          Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
+
+/* #include "deflate.h" */
+
+#include <linux/zutil.h>
+#include "defutil.h"
+
+#ifdef DEBUG_ZLIB
+#  include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6      16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10    17
+/* repeat a zero length 3-10 times  (3 bits of repeat count) */
+
+#define REPZ_11_138  18
+/* repeat a zero length 11-138 times  (7 bits of repeat count) */
+
+static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+   = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+static const int extra_dbits[D_CODES] /* extra bits for each distance code */
+   = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+   = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+static const uch bl_order[BL_CODES]
+   = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+static ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
+ * below).
+ */
+
+static ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+static uch dist_code[512];
+/* distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+static uch length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+static int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+static int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+struct static_tree_desc_s {
+    const ct_data *static_tree;  /* static tree or NULL */
+    const int *extra_bits;       /* extra bits for each code or NULL */
+    int     extra_base;          /* base index for extra_bits */
+    int     elems;               /* max number of elements in the tree */
+    int     max_length;          /* max bit length for the codes */
+};
+
+static static_tree_desc  static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+static static_tree_desc  static_d_desc =
+{static_dtree, extra_dbits, 0,          D_CODES, MAX_BITS};
+
+static static_tree_desc  static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0,   BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+static void tr_static_init (void);
+static void init_block     (deflate_state *s);
+static void pqdownheap     (deflate_state *s, ct_data *tree, int k);
+static void gen_bitlen     (deflate_state *s, tree_desc *desc);
+static void gen_codes      (ct_data *tree, int max_code, ush *bl_count);
+static void build_tree     (deflate_state *s, tree_desc *desc);
+static void scan_tree      (deflate_state *s, ct_data *tree, int max_code);
+static void send_tree      (deflate_state *s, ct_data *tree, int max_code);
+static int  build_bl_tree  (deflate_state *s);
+static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
+                           int blcodes);
+static void compress_block (deflate_state *s, ct_data *ltree,
+                           ct_data *dtree);
+static void set_data_type  (deflate_state *s);
+static unsigned bi_reverse (unsigned value, int length);
+static void bi_windup      (deflate_state *s);
+static void bi_flush       (deflate_state *s);
+static void copy_block     (deflate_state *s, char *buf, unsigned len,
+                           int header);
+
+#ifndef DEBUG_ZLIB
+#  define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+   /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG_ZLIB */
+#  define send_code(s, c, tree) \
+     { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+       send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+#define d_code(dist) \
+   ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. dist_code[256] and dist_code[257] are never
+ * used.
+ */
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+static void send_bits      (deflate_state *s, int value, int length);
+
+static void send_bits(
+	deflate_state *s,
+	int value,  /* value to send */
+	int length  /* number of bits */
+)
+{
+    Tracevv((stderr," l %2d v %4x ", length, value));
+    Assert(length > 0 && length <= 15, "invalid length");
+    s->bits_sent += (ulg)length;
+
+    /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+     * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+     * unused bits in value.
+     */
+    if (s->bi_valid > (int)Buf_size - length) {
+        s->bi_buf |= (value << s->bi_valid);
+        put_short(s, s->bi_buf);
+        s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+        s->bi_valid += length - Buf_size;
+    } else {
+        s->bi_buf |= value << s->bi_valid;
+        s->bi_valid += length;
+    }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+  if (s->bi_valid > (int)Buf_size - len) {\
+    int val = value;\
+    s->bi_buf |= (val << s->bi_valid);\
+    put_short(s, s->bi_buf);\
+    s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+    s->bi_valid += len - Buf_size;\
+  } else {\
+    s->bi_buf |= (value) << s->bi_valid;\
+    s->bi_valid += len;\
+  }\
+}
+#endif /* DEBUG_ZLIB */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables. In a multi-threaded environment,
+ * this function may be called by two threads concurrently, but this is
+ * harmless since both invocations do exactly the same thing.
+ */
+static void tr_static_init(void)
+{
+    static int static_init_done;
+    int n;        /* iterates over tree elements */
+    int bits;     /* bit counter */
+    int length;   /* length value */
+    int code;     /* code value */
+    int dist;     /* distance index */
+    ush bl_count[MAX_BITS+1];
+    /* number of codes at each bit length for an optimal tree */
+
+    if (static_init_done) return;
+
+    /* Initialize the mapping length (0..255) -> length code (0..28) */
+    length = 0;
+    for (code = 0; code < LENGTH_CODES-1; code++) {
+        base_length[code] = length;
+        for (n = 0; n < (1<<extra_lbits[code]); n++) {
+            length_code[length++] = (uch)code;
+        }
+    }
+    Assert (length == 256, "tr_static_init: length != 256");
+    /* Note that the length 255 (match length 258) can be represented
+     * in two different ways: code 284 + 5 bits or code 285, so we
+     * overwrite length_code[255] to use the best encoding:
+     */
+    length_code[length-1] = (uch)code;
+
+    /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+    dist = 0;
+    for (code = 0 ; code < 16; code++) {
+        base_dist[code] = dist;
+        for (n = 0; n < (1<<extra_dbits[code]); n++) {
+            dist_code[dist++] = (uch)code;
+        }
+    }
+    Assert (dist == 256, "tr_static_init: dist != 256");
+    dist >>= 7; /* from now on, all distances are divided by 128 */
+    for ( ; code < D_CODES; code++) {
+        base_dist[code] = dist << 7;
+        for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+            dist_code[256 + dist++] = (uch)code;
+        }
+    }
+    Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+    /* Construct the codes of the static literal tree */
+    for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+    n = 0;
+    while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+    while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+    while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+    while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+    /* Codes 286 and 287 do not exist, but we must include them in the
+     * tree construction to get a canonical Huffman tree (longest code
+     * all ones)
+     */
+    gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+    /* The static distance tree is trivial: */
+    for (n = 0; n < D_CODES; n++) {
+        static_dtree[n].Len = 5;
+        static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+    }
+    static_init_done = 1;
+}
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void zlib_tr_init(
+	deflate_state *s
+)
+{
+    tr_static_init();
+
+    s->compressed_len = 0L;
+
+    s->l_desc.dyn_tree = s->dyn_ltree;
+    s->l_desc.stat_desc = &static_l_desc;
+
+    s->d_desc.dyn_tree = s->dyn_dtree;
+    s->d_desc.stat_desc = &static_d_desc;
+
+    s->bl_desc.dyn_tree = s->bl_tree;
+    s->bl_desc.stat_desc = &static_bl_desc;
+
+    s->bi_buf = 0;
+    s->bi_valid = 0;
+    s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG_ZLIB
+    s->bits_sent = 0L;
+#endif
+
+    /* Initialize the first block of the first file: */
+    init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+static void init_block(
+	deflate_state *s
+)
+{
+    int n; /* iterates over tree elements */
+
+    /* Initialize the trees. */
+    for (n = 0; n < L_CODES;  n++) s->dyn_ltree[n].Freq = 0;
+    for (n = 0; n < D_CODES;  n++) s->dyn_dtree[n].Freq = 0;
+    for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+    s->dyn_ltree[END_BLOCK].Freq = 1;
+    s->opt_len = s->static_len = 0L;
+    s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+    top = s->heap[SMALLEST]; \
+    s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+    pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+   (tree[n].Freq < tree[m].Freq || \
+   (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+static void pqdownheap(
+	deflate_state *s,
+	ct_data *tree,  /* the tree to restore */
+	int k		/* node to move down */
+)
+{
+    int v = s->heap[k];
+    int j = k << 1;  /* left son of k */
+    while (j <= s->heap_len) {
+        /* Set j to the smallest of the two sons: */
+        if (j < s->heap_len &&
+            smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+            j++;
+        }
+        /* Exit if v is smaller than both sons */
+        if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+        /* Exchange v with the smallest son */
+        s->heap[k] = s->heap[j];  k = j;
+
+        /* And continue down the tree, setting j to the left son of k */
+        j <<= 1;
+    }
+    s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ *    above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ *     array bl_count contains the frequencies for each bit length.
+ *     The length opt_len is updated; static_len is also updated if stree is
+ *     not null.
+ */
+static void gen_bitlen(
+	deflate_state *s,
+	tree_desc *desc    /* the tree descriptor */
+)
+{
+    ct_data *tree        = desc->dyn_tree;
+    int max_code         = desc->max_code;
+    const ct_data *stree = desc->stat_desc->static_tree;
+    const int *extra     = desc->stat_desc->extra_bits;
+    int base             = desc->stat_desc->extra_base;
+    int max_length       = desc->stat_desc->max_length;
+    int h;              /* heap index */
+    int n, m;           /* iterate over the tree elements */
+    int bits;           /* bit length */
+    int xbits;          /* extra bits */
+    ush f;              /* frequency */
+    int overflow = 0;   /* number of elements with bit length too large */
+
+    for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+    /* In a first pass, compute the optimal bit lengths (which may
+     * overflow in the case of the bit length tree).
+     */
+    tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+    for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+        n = s->heap[h];
+        bits = tree[tree[n].Dad].Len + 1;
+        if (bits > max_length) bits = max_length, overflow++;
+        tree[n].Len = (ush)bits;
+        /* We overwrite tree[n].Dad which is no longer needed */
+
+        if (n > max_code) continue; /* not a leaf node */
+
+        s->bl_count[bits]++;
+        xbits = 0;
+        if (n >= base) xbits = extra[n-base];
+        f = tree[n].Freq;
+        s->opt_len += (ulg)f * (bits + xbits);
+        if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+    }
+    if (overflow == 0) return;
+
+    Trace((stderr,"\nbit length overflow\n"));
+    /* This happens for example on obj2 and pic of the Calgary corpus */
+
+    /* Find the first bit length which could increase: */
+    do {
+        bits = max_length-1;
+        while (s->bl_count[bits] == 0) bits--;
+        s->bl_count[bits]--;      /* move one leaf down the tree */
+        s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+        s->bl_count[max_length]--;
+        /* The brother of the overflow item also moves one step up,
+         * but this does not affect bl_count[max_length]
+         */
+        overflow -= 2;
+    } while (overflow > 0);
+
+    /* Now recompute all bit lengths, scanning in increasing frequency.
+     * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+     * lengths instead of fixing only the wrong ones. This idea is taken
+     * from 'ar' written by Haruhiko Okumura.)
+     */
+    for (bits = max_length; bits != 0; bits--) {
+        n = s->bl_count[bits];
+        while (n != 0) {
+            m = s->heap[--h];
+            if (m > max_code) continue;
+            if (tree[m].Len != (unsigned) bits) {
+                Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+                s->opt_len += ((long)bits - (long)tree[m].Len)
+                              *(long)tree[m].Freq;
+                tree[m].Len = (ush)bits;
+            }
+            n--;
+        }
+    }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ *     zero code length.
+ */
+static void gen_codes(
+	ct_data *tree,             /* the tree to decorate */
+	int max_code,              /* largest code with non zero frequency */
+	ush *bl_count             /* number of codes at each bit length */
+)
+{
+    ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+    ush code = 0;              /* running code value */
+    int bits;                  /* bit index */
+    int n;                     /* code index */
+
+    /* The distribution counts are first used to generate the code values
+     * without bit reversal.
+     */
+    for (bits = 1; bits <= MAX_BITS; bits++) {
+        next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+    }
+    /* Check that the bit counts in bl_count are consistent. The last code
+     * must be all ones.
+     */
+    Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+            "inconsistent bit counts");
+    Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+    for (n = 0;  n <= max_code; n++) {
+        int len = tree[n].Len;
+        if (len == 0) continue;
+        /* Now reverse the bits */
+        tree[n].Code = bi_reverse(next_code[len]++, len);
+
+        Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+             n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+    }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ *     and corresponding code. The length opt_len is updated; static_len is
+ *     also updated if stree is not null. The field max_code is set.
+ */
+static void build_tree(
+	deflate_state *s,
+	tree_desc *desc	 /* the tree descriptor */
+)
+{
+    ct_data *tree         = desc->dyn_tree;
+    const ct_data *stree  = desc->stat_desc->static_tree;
+    int elems             = desc->stat_desc->elems;
+    int n, m;          /* iterate over heap elements */
+    int max_code = -1; /* largest code with non zero frequency */
+    int node;          /* new node being created */
+
+    /* Construct the initial heap, with least frequent element in
+     * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+     * heap[0] is not used.
+     */
+    s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+    for (n = 0; n < elems; n++) {
+        if (tree[n].Freq != 0) {
+            s->heap[++(s->heap_len)] = max_code = n;
+            s->depth[n] = 0;
+        } else {
+            tree[n].Len = 0;
+        }
+    }
+
+    /* The pkzip format requires that at least one distance code exists,
+     * and that at least one bit should be sent even if there is only one
+     * possible code. So to avoid special checks later on we force at least
+     * two codes of non zero frequency.
+     */
+    while (s->heap_len < 2) {
+        node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+        tree[node].Freq = 1;
+        s->depth[node] = 0;
+        s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+        /* node is 0 or 1 so it does not have extra bits */
+    }
+    desc->max_code = max_code;
+
+    /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+     * establish sub-heaps of increasing lengths:
+     */
+    for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+    /* Construct the Huffman tree by repeatedly combining the least two
+     * frequent nodes.
+     */
+    node = elems;              /* next internal node of the tree */
+    do {
+        pqremove(s, tree, n);  /* n = node of least frequency */
+        m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+        s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+        s->heap[--(s->heap_max)] = m;
+
+        /* Create a new node father of n and m */
+        tree[node].Freq = tree[n].Freq + tree[m].Freq;
+        s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
+        tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+        if (tree == s->bl_tree) {
+            fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+                    node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+        }
+#endif
+        /* and insert the new node in the heap */
+        s->heap[SMALLEST] = node++;
+        pqdownheap(s, tree, SMALLEST);
+
+    } while (s->heap_len >= 2);
+
+    s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+    /* At this point, the fields freq and dad are set. We can now
+     * generate the bit lengths.
+     */
+    gen_bitlen(s, (tree_desc *)desc);
+
+    /* The field len is now set, we can generate the bit codes */
+    gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+static void scan_tree(
+	deflate_state *s,
+	ct_data *tree,   /* the tree to be scanned */
+	int max_code     /* and its largest code of non zero frequency */
+)
+{
+    int n;                     /* iterates over all tree elements */
+    int prevlen = -1;          /* last emitted length */
+    int curlen;                /* length of current code */
+    int nextlen = tree[0].Len; /* length of next code */
+    int count = 0;             /* repeat count of the current code */
+    int max_count = 7;         /* max repeat count */
+    int min_count = 4;         /* min repeat count */
+
+    if (nextlen == 0) max_count = 138, min_count = 3;
+    tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+    for (n = 0; n <= max_code; n++) {
+        curlen = nextlen; nextlen = tree[n+1].Len;
+        if (++count < max_count && curlen == nextlen) {
+            continue;
+        } else if (count < min_count) {
+            s->bl_tree[curlen].Freq += count;
+        } else if (curlen != 0) {
+            if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+            s->bl_tree[REP_3_6].Freq++;
+        } else if (count <= 10) {
+            s->bl_tree[REPZ_3_10].Freq++;
+        } else {
+            s->bl_tree[REPZ_11_138].Freq++;
+        }
+        count = 0; prevlen = curlen;
+        if (nextlen == 0) {
+            max_count = 138, min_count = 3;
+        } else if (curlen == nextlen) {
+            max_count = 6, min_count = 3;
+        } else {
+            max_count = 7, min_count = 4;
+        }
+    }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+static void send_tree(
+	deflate_state *s,
+	ct_data *tree, /* the tree to be scanned */
+	int max_code   /* and its largest code of non zero frequency */
+)
+{
+    int n;                     /* iterates over all tree elements */
+    int prevlen = -1;          /* last emitted length */
+    int curlen;                /* length of current code */
+    int nextlen = tree[0].Len; /* length of next code */
+    int count = 0;             /* repeat count of the current code */
+    int max_count = 7;         /* max repeat count */
+    int min_count = 4;         /* min repeat count */
+
+    /* tree[max_code+1].Len = -1; */  /* guard already set */
+    if (nextlen == 0) max_count = 138, min_count = 3;
+
+    for (n = 0; n <= max_code; n++) {
+        curlen = nextlen; nextlen = tree[n+1].Len;
+        if (++count < max_count && curlen == nextlen) {
+            continue;
+        } else if (count < min_count) {
+            do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+        } else if (curlen != 0) {
+            if (curlen != prevlen) {
+                send_code(s, curlen, s->bl_tree); count--;
+            }
+            Assert(count >= 3 && count <= 6, " 3_6?");
+            send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+        } else if (count <= 10) {
+            send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+        } else {
+            send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+        }
+        count = 0; prevlen = curlen;
+        if (nextlen == 0) {
+            max_count = 138, min_count = 3;
+        } else if (curlen == nextlen) {
+            max_count = 6, min_count = 3;
+        } else {
+            max_count = 7, min_count = 4;
+        }
+    }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+static int build_bl_tree(
+	deflate_state *s
+)
+{
+    int max_blindex;  /* index of last bit length code of non zero freq */
+
+    /* Determine the bit length frequencies for literal and distance trees */
+    scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+    scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+    /* Build the bit length tree: */
+    build_tree(s, (tree_desc *)(&(s->bl_desc)));
+    /* opt_len now includes the length of the tree representations, except
+     * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+     */
+
+    /* Determine the number of bit length codes to send. The pkzip format
+     * requires that at least 4 bit length codes be sent. (appnote.txt says
+     * 3 but the actual value used is 4.)
+     */
+    for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+        if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+    }
+    /* Update opt_len to include the bit length tree and counts */
+    s->opt_len += 3*(max_blindex+1) + 5+5+4;
+    Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+            s->opt_len, s->static_len));
+
+    return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+static void send_all_trees(
+	deflate_state *s,
+	int lcodes,  /* number of codes for each tree */
+	int dcodes,  /* number of codes for each tree */
+	int blcodes  /* number of codes for each tree */
+)
+{
+    int rank;                    /* index in bl_order */
+
+    Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+    Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+            "too many codes");
+    Tracev((stderr, "\nbl counts: "));
+    send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+    send_bits(s, dcodes-1,   5);
+    send_bits(s, blcodes-4,  4); /* not -3 as stated in appnote.txt */
+    for (rank = 0; rank < blcodes; rank++) {
+        Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+        send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+    }
+    Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+    send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+    Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+    send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+    Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void zlib_tr_stored_block(
+	deflate_state *s,
+	char *buf,        /* input block */
+	ulg stored_len,   /* length of input block */
+	int eof           /* true if this is the last block for a file */
+)
+{
+    send_bits(s, (STORED_BLOCK<<1)+eof, 3);  /* send block type */
+    s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+    s->compressed_len += (stored_len + 4) << 3;
+
+    copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* Send just the `stored block' type code without any length bytes or data.
+ */
+void zlib_tr_stored_type_only(
+	deflate_state *s
+)
+{
+    send_bits(s, (STORED_BLOCK << 1), 3);
+    bi_windup(s);
+    s->compressed_len = (s->compressed_len + 3) & ~7L;
+}
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void zlib_tr_align(
+	deflate_state *s
+)
+{
+    send_bits(s, STATIC_TREES<<1, 3);
+    send_code(s, END_BLOCK, static_ltree);
+    s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+    bi_flush(s);
+    /* Of the 10 bits for the empty block, we have already sent
+     * (10 - bi_valid) bits. The lookahead for the last real code (before
+     * the EOB of the previous block) was thus at least one plus the length
+     * of the EOB plus what we have just sent of the empty static block.
+     */
+    if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+        send_bits(s, STATIC_TREES<<1, 3);
+        send_code(s, END_BLOCK, static_ltree);
+        s->compressed_len += 10L;
+        bi_flush(s);
+    }
+    s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file. This function
+ * returns the total compressed length for the file so far.
+ */
+ulg zlib_tr_flush_block(
+	deflate_state *s,
+	char *buf,        /* input block, or NULL if too old */
+	ulg stored_len,   /* length of input block */
+	int eof           /* true if this is the last block for a file */
+)
+{
+    ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+    int max_blindex = 0;  /* index of last bit length code of non zero freq */
+
+    /* Build the Huffman trees unless a stored block is forced */
+    if (s->level > 0) {
+
+	 /* Check if the file is ascii or binary */
+	if (s->data_type == Z_UNKNOWN) set_data_type(s);
+
+	/* Construct the literal and distance trees */
+	build_tree(s, (tree_desc *)(&(s->l_desc)));
+	Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+		s->static_len));
+
+	build_tree(s, (tree_desc *)(&(s->d_desc)));
+	Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+		s->static_len));
+	/* At this point, opt_len and static_len are the total bit lengths of
+	 * the compressed block data, excluding the tree representations.
+	 */
+
+	/* Build the bit length tree for the above two trees, and get the index
+	 * in bl_order of the last bit length code to send.
+	 */
+	max_blindex = build_bl_tree(s);
+
+	/* Determine the best encoding. Compute first the block length in bytes*/
+	opt_lenb = (s->opt_len+3+7)>>3;
+	static_lenb = (s->static_len+3+7)>>3;
+
+	Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+		opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+		s->last_lit));
+
+	if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+    } else {
+        Assert(buf != (char*)0, "lost buf");
+	opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+    }
+
+    /* If compression failed and this is the first and last block,
+     * and if the .zip file can be seeked (to rewrite the local header),
+     * the whole file is transformed into a stored file:
+     */
+#ifdef STORED_FILE_OK
+#  ifdef FORCE_STORED_FILE
+    if (eof && s->compressed_len == 0L) { /* force stored file */
+#  else
+    if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
+#  endif
+        /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
+        if (buf == (char*)0) error ("block vanished");
+
+        copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
+        s->compressed_len = stored_len << 3;
+        s->method = STORED;
+    } else
+#endif /* STORED_FILE_OK */
+
+#ifdef FORCE_STORED
+    if (buf != (char*)0) { /* force stored block */
+#else
+    if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+                       /* 4: two words for the lengths */
+#endif
+        /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+         * Otherwise we can't have processed more than WSIZE input bytes since
+         * the last block flush, because compression would have been
+         * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+         * transform a block into a stored block.
+         */
+        zlib_tr_stored_block(s, buf, stored_len, eof);
+
+#ifdef FORCE_STATIC
+    } else if (static_lenb >= 0) { /* force static trees */
+#else
+    } else if (static_lenb == opt_lenb) {
+#endif
+        send_bits(s, (STATIC_TREES<<1)+eof, 3);
+        compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+        s->compressed_len += 3 + s->static_len;
+    } else {
+        send_bits(s, (DYN_TREES<<1)+eof, 3);
+        send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+                       max_blindex+1);
+        compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+        s->compressed_len += 3 + s->opt_len;
+    }
+    Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+    init_block(s);
+
+    if (eof) {
+        bi_windup(s);
+        s->compressed_len += 7;  /* align on byte boundary */
+    }
+    Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+           s->compressed_len-7*eof));
+
+    return s->compressed_len >> 3;
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int zlib_tr_tally(
+	deflate_state *s,
+	unsigned dist,  /* distance of matched string */
+	unsigned lc     /* match length-MIN_MATCH or unmatched char (if dist==0) */
+)
+{
+    s->d_buf[s->last_lit] = (ush)dist;
+    s->l_buf[s->last_lit++] = (uch)lc;
+    if (dist == 0) {
+        /* lc is the unmatched char */
+        s->dyn_ltree[lc].Freq++;
+    } else {
+        s->matches++;
+        /* Here, lc is the match length - MIN_MATCH */
+        dist--;             /* dist = match distance - 1 */
+        Assert((ush)dist < (ush)MAX_DIST(s) &&
+               (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+               (ush)d_code(dist) < (ush)D_CODES,  "zlib_tr_tally: bad match");
+
+        s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
+        s->dyn_dtree[d_code(dist)].Freq++;
+    }
+
+    /* Try to guess if it is profitable to stop the current block here */
+    if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
+        /* Compute an upper bound for the compressed length */
+        ulg out_length = (ulg)s->last_lit*8L;
+        ulg in_length = (ulg)((long)s->strstart - s->block_start);
+        int dcode;
+        for (dcode = 0; dcode < D_CODES; dcode++) {
+            out_length += (ulg)s->dyn_dtree[dcode].Freq *
+                (5L+extra_dbits[dcode]);
+        }
+        out_length >>= 3;
+        Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+               s->last_lit, in_length, out_length,
+               100L - out_length*100L/in_length));
+        if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+    }
+    return (s->last_lit == s->lit_bufsize-1);
+    /* We avoid equality with lit_bufsize because of wraparound at 64K
+     * on 16 bit machines and because stored blocks are restricted to
+     * 64K-1 bytes.
+     */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+static void compress_block(
+	deflate_state *s,
+	ct_data *ltree, /* literal tree */
+	ct_data *dtree  /* distance tree */
+)
+{
+    unsigned dist;      /* distance of matched string */
+    int lc;             /* match length or unmatched char (if dist == 0) */
+    unsigned lx = 0;    /* running index in l_buf */
+    unsigned code;      /* the code to send */
+    int extra;          /* number of extra bits to send */
+
+    if (s->last_lit != 0) do {
+        dist = s->d_buf[lx];
+        lc = s->l_buf[lx++];
+        if (dist == 0) {
+            send_code(s, lc, ltree); /* send a literal byte */
+            Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+        } else {
+            /* Here, lc is the match length - MIN_MATCH */
+            code = length_code[lc];
+            send_code(s, code+LITERALS+1, ltree); /* send the length code */
+            extra = extra_lbits[code];
+            if (extra != 0) {
+                lc -= base_length[code];
+                send_bits(s, lc, extra);       /* send the extra length bits */
+            }
+            dist--; /* dist is now the match distance - 1 */
+            code = d_code(dist);
+            Assert (code < D_CODES, "bad d_code");
+
+            send_code(s, code, dtree);       /* send the distance code */
+            extra = extra_dbits[code];
+            if (extra != 0) {
+                dist -= base_dist[code];
+                send_bits(s, dist, extra);   /* send the extra distance bits */
+            }
+        } /* literal or match pair ? */
+
+        /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+        Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
+
+    } while (lx < s->last_lit);
+
+    send_code(s, END_BLOCK, ltree);
+    s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Set the data type to ASCII or BINARY, using a crude approximation:
+ * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ * IN assertion: the fields freq of dyn_ltree are set and the total of all
+ * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ */
+static void set_data_type(
+	deflate_state *s
+)
+{
+    int n = 0;
+    unsigned ascii_freq = 0;
+    unsigned bin_freq = 0;
+    while (n < 7)        bin_freq += s->dyn_ltree[n++].Freq;
+    while (n < 128)    ascii_freq += s->dyn_ltree[n++].Freq;
+    while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
+    s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+static void copy_block(
+	deflate_state *s,
+	char    *buf,     /* the input data */
+	unsigned len,     /* its length */
+	int      header   /* true if block header must be written */
+)
+{
+    bi_windup(s);        /* align on byte boundary */
+    s->last_eob_len = 8; /* enough lookahead for inflate */
+
+    if (header) {
+        put_short(s, (ush)len);   
+        put_short(s, (ush)~len);
+#ifdef DEBUG_ZLIB
+        s->bits_sent += 2*16;
+#endif
+    }
+#ifdef DEBUG_ZLIB
+    s->bits_sent += (ulg)len<<3;
+#endif
+    /* bundle up the put_byte(s, *buf++) calls */
+    memcpy(&s->pending_buf[s->pending], buf, len);
+    s->pending += len;
+}
+
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
new file mode 100644
index 0000000..d9feaf6
--- /dev/null
+++ b/lib/zlib_deflate/defutil.h
@@ -0,0 +1,334 @@
+
+
+
+#define Assert(err, str) 
+#define Trace(dummy) 
+#define Tracev(dummy) 
+#define Tracecv(err, dummy) 
+#define Tracevv(dummy) 
+
+
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS  256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES   30
+/* number of distance codes */
+
+#define BL_CODES  19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE    42
+#define BUSY_STATE   113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+    union {
+        ush  freq;       /* frequency count */
+        ush  code;       /* bit string */
+    } fc;
+    union {
+        ush  dad;        /* father node in Huffman tree */
+        ush  len;        /* length of bit string */
+    } dl;
+} ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad  dl.dad
+#define Len  dl.len
+
+typedef struct static_tree_desc_s  static_tree_desc;
+
+typedef struct tree_desc_s {
+    ct_data *dyn_tree;           /* the dynamic tree */
+    int     max_code;            /* largest code with non zero frequency */
+    static_tree_desc *stat_desc; /* the corresponding static tree */
+} tree_desc;
+
+typedef ush Pos;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct deflate_state {
+    z_streamp strm;      /* pointer back to this zlib stream */
+    int   status;        /* as the name implies */
+    Byte *pending_buf;   /* output still pending */
+    ulg   pending_buf_size; /* size of pending_buf */
+    Byte *pending_out;   /* next pending byte to output to the stream */
+    int   pending;       /* nb of bytes in the pending buffer */
+    int   noheader;      /* suppress zlib header and adler32 */
+    Byte  data_type;     /* UNKNOWN, BINARY or ASCII */
+    Byte  method;        /* STORED (for zip only) or DEFLATED */
+    int   last_flush;    /* value of flush param for previous deflate call */
+
+                /* used by deflate.c: */
+
+    uInt  w_size;        /* LZ77 window size (32K by default) */
+    uInt  w_bits;        /* log2(w_size)  (8..16) */
+    uInt  w_mask;        /* w_size - 1 */
+
+    Byte *window;
+    /* Sliding window. Input bytes are read into the second half of the window,
+     * and move to the first half later to keep a dictionary of at least wSize
+     * bytes. With this organization, matches are limited to a distance of
+     * wSize-MAX_MATCH bytes, but this ensures that IO is always
+     * performed with a length multiple of the block size. Also, it limits
+     * the window size to 64K, which is quite useful on MSDOS.
+     * To do: use the user input buffer as sliding window.
+     */
+
+    ulg window_size;
+    /* Actual size of window: 2*wSize, except when the user input buffer
+     * is directly used as sliding window.
+     */
+
+    Pos *prev;
+    /* Link to older string with same hash index. To limit the size of this
+     * array to 64K, this link is maintained only for the last 32K strings.
+     * An index in this array is thus a window index modulo 32K.
+     */
+
+    Pos *head; /* Heads of the hash chains or NIL. */
+
+    uInt  ins_h;          /* hash index of string to be inserted */
+    uInt  hash_size;      /* number of elements in hash table */
+    uInt  hash_bits;      /* log2(hash_size) */
+    uInt  hash_mask;      /* hash_size-1 */
+
+    uInt  hash_shift;
+    /* Number of bits by which ins_h must be shifted at each input
+     * step. It must be such that after MIN_MATCH steps, the oldest
+     * byte no longer takes part in the hash key, that is:
+     *   hash_shift * MIN_MATCH >= hash_bits
+     */
+
+    long block_start;
+    /* Window position at the beginning of the current output block. Gets
+     * negative when the window is moved backwards.
+     */
+
+    uInt match_length;           /* length of best match */
+    IPos prev_match;             /* previous match */
+    int match_available;         /* set if previous match exists */
+    uInt strstart;               /* start of string to insert */
+    uInt match_start;            /* start of matching string */
+    uInt lookahead;              /* number of valid bytes ahead in window */
+
+    uInt prev_length;
+    /* Length of the best match at previous step. Matches not greater than this
+     * are discarded. This is used in the lazy match evaluation.
+     */
+
+    uInt max_chain_length;
+    /* To speed up deflation, hash chains are never searched beyond this
+     * length.  A higher limit improves compression ratio but degrades the
+     * speed.
+     */
+
+    uInt max_lazy_match;
+    /* Attempt to find a better match only when the current match is strictly
+     * smaller than this value. This mechanism is used only for compression
+     * levels >= 4.
+     */
+#   define max_insert_length  max_lazy_match
+    /* Insert new strings in the hash table only if the match length is not
+     * greater than this length. This saves time but degrades compression.
+     * max_insert_length is used only for compression levels <= 3.
+     */
+
+    int level;    /* compression level (1..9) */
+    int strategy; /* favor or force Huffman coding*/
+
+    uInt good_match;
+    /* Use a faster search when the previous match is longer than this */
+
+    int nice_match; /* Stop searching when current match exceeds this */
+
+                /* used by trees.c: */
+    /* Didn't use ct_data typedef below to supress compiler warning */
+    struct ct_data_s dyn_ltree[HEAP_SIZE];   /* literal and length tree */
+    struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+    struct ct_data_s bl_tree[2*BL_CODES+1];  /* Huffman tree for bit lengths */
+
+    struct tree_desc_s l_desc;               /* desc. for literal tree */
+    struct tree_desc_s d_desc;               /* desc. for distance tree */
+    struct tree_desc_s bl_desc;              /* desc. for bit length tree */
+
+    ush bl_count[MAX_BITS+1];
+    /* number of codes at each bit length for an optimal tree */
+
+    int heap[2*L_CODES+1];      /* heap used to build the Huffman trees */
+    int heap_len;               /* number of elements in the heap */
+    int heap_max;               /* element of largest frequency */
+    /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+     * The same heap array is used to build all trees.
+     */
+
+    uch depth[2*L_CODES+1];
+    /* Depth of each subtree used as tie breaker for trees of equal frequency
+     */
+
+    uch *l_buf;          /* buffer for literals or lengths */
+
+    uInt  lit_bufsize;
+    /* Size of match buffer for literals/lengths.  There are 4 reasons for
+     * limiting lit_bufsize to 64K:
+     *   - frequencies can be kept in 16 bit counters
+     *   - if compression is not successful for the first block, all input
+     *     data is still in the window so we can still emit a stored block even
+     *     when input comes from standard input.  (This can also be done for
+     *     all blocks if lit_bufsize is not greater than 32K.)
+     *   - if compression is not successful for a file smaller than 64K, we can
+     *     even emit a stored file instead of a stored block (saving 5 bytes).
+     *     This is applicable only for zip (not gzip or zlib).
+     *   - creating new Huffman trees less frequently may not provide fast
+     *     adaptation to changes in the input data statistics. (Take for
+     *     example a binary file with poorly compressible code followed by
+     *     a highly compressible string table.) Smaller buffer sizes give
+     *     fast adaptation but have of course the overhead of transmitting
+     *     trees more frequently.
+     *   - I can't count above 4
+     */
+
+    uInt last_lit;      /* running index in l_buf */
+
+    ush *d_buf;
+    /* Buffer for distances. To simplify the code, d_buf and l_buf have
+     * the same number of elements. To use different lengths, an extra flag
+     * array would be necessary.
+     */
+
+    ulg opt_len;        /* bit length of current block with optimal trees */
+    ulg static_len;     /* bit length of current block with static trees */
+    ulg compressed_len; /* total bit length of compressed file */
+    uInt matches;       /* number of string matches in current block */
+    int last_eob_len;   /* bit length of EOB code for last block */
+
+#ifdef DEBUG_ZLIB
+    ulg bits_sent;      /* bit length of the compressed data */
+#endif
+
+    ush bi_buf;
+    /* Output buffer. bits are inserted starting at the bottom (least
+     * significant bits).
+     */
+    int bi_valid;
+    /* Number of valid bits in bi_buf.  All bits above the last valid bit
+     * are always zero.
+     */
+
+} deflate_state;
+
+typedef struct deflate_workspace {
+    /* State memory for the deflator */
+    deflate_state deflate_memory;
+    Byte window_memory[2 * (1 << MAX_WBITS)];
+    Pos prev_memory[1 << MAX_WBITS];
+    Pos head_memory[1 << (MAX_MEM_LEVEL + 7)];
+    char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)];
+} deflate_workspace;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s)  ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+        /* in trees.c */
+void zlib_tr_init         (deflate_state *s);
+int  zlib_tr_tally        (deflate_state *s, unsigned dist, unsigned lc);
+ulg  zlib_tr_flush_block  (deflate_state *s, char *buf, ulg stored_len,
+			   int eof);
+void zlib_tr_align        (deflate_state *s);
+void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
+			   int eof);
+void zlib_tr_stored_type_only (deflate_state *);
+
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+    put_byte(s, (uch)((w) & 0xff)); \
+    put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+static inline unsigned bi_reverse(unsigned code, /* the value to invert */
+				  int len)       /* its bit length */
+{
+    register unsigned res = 0;
+    do {
+        res |= code & 1;
+        code >>= 1, res <<= 1;
+    } while (--len > 0);
+    return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+static inline void bi_flush(deflate_state *s)
+{
+    if (s->bi_valid == 16) {
+        put_short(s, s->bi_buf);
+        s->bi_buf = 0;
+        s->bi_valid = 0;
+    } else if (s->bi_valid >= 8) {
+        put_byte(s, (Byte)s->bi_buf);
+        s->bi_buf >>= 8;
+        s->bi_valid -= 8;
+    }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+static inline void bi_windup(deflate_state *s)
+{
+    if (s->bi_valid > 8) {
+        put_short(s, s->bi_buf);
+    } else if (s->bi_valid > 0) {
+        put_byte(s, (Byte)s->bi_buf);
+    }
+    s->bi_buf = 0;
+    s->bi_valid = 0;
+#ifdef DEBUG_ZLIB
+    s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
diff --git a/lib/zlib_inflate/Makefile b/lib/zlib_inflate/Makefile
new file mode 100644
index 0000000..221c139
--- /dev/null
+++ b/lib/zlib_inflate/Makefile
@@ -0,0 +1,19 @@
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is only the decompression, see zlib_deflate for the
+# the compression
+#
+# Decompression needs to be serialized for each memory
+# allocation.
+#
+# (The upsides of the simplification is that you can't get in
+# any nasty situations wrt memory management, and that the
+# uncompression can be done without blocking on allocation).
+#
+
+obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate.o
+
+zlib_inflate-objs := infblock.o infcodes.o inffast.o inflate.o \
+		     inflate_sync.o inftrees.o infutil.o inflate_syms.o
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c
new file mode 100644
index 0000000..50f21ca4
--- /dev/null
+++ b/lib/zlib_inflate/infblock.c
@@ -0,0 +1,361 @@
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "inftrees.h"
+#include "infcodes.h"
+#include "infutil.h"
+
+struct inflate_codes_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* Table for deflate from PKZIP's appnote.txt. */
+static const uInt border[] = { /* Order of the bit length code lengths */
+        16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+   Notes beyond the 1.93a appnote.txt:
+
+   1. Distance pointers never point before the beginning of the output
+      stream.
+   2. Distance pointers can point back across blocks, up to 32k away.
+   3. There is an implied maximum of 7 bits for the bit length table and
+      15 bits for the actual data.
+   4. If only one code exists, then it is encoded using one bit.  (Zero
+      would be more efficient, but perhaps a little confusing.)  If two
+      codes exist, they are coded using one bit each (0 and 1).
+   5. There is no way of sending zero distance codes--a dummy must be
+      sent if there are none.  (History: a pre 2.0 version of PKZIP would
+      store blocks with no distance codes, but this was discovered to be
+      too harsh a criterion.)  Valid only for 1.93a.  2.04c does allow
+      zero distance codes, which is sent as one code of zero bits in
+      length.
+   6. There are up to 286 literal/length codes.  Code 256 represents the
+      end-of-block.  Note however that the static length tree defines
+      288 codes just to fill out the Huffman codes.  Codes 286 and 287
+      cannot be used though, since there is no length base or extra bits
+      defined for them.  Similarily, there are up to 30 distance codes.
+      However, static trees define 32 codes (all 5 bits) to fill out the
+      Huffman codes, but the last two had better not show up in the data.
+   7. Unzip can check dynamic Huffman blocks for complete code sets.
+      The exception is that a single code would not be complete (see #4).
+   8. The five bits following the block type is really the number of
+      literal codes sent minus 257.
+   9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+      (1+6+6).  Therefore, to output three times the length, you output
+      three codes (1+1+1), whereas to output four times the same length,
+      you only need two codes (1+3).  Hmm.
+  10. In the tree reconstruction algorithm, Code = Code + Increment
+      only if BitLength(i) is not zero.  (Pretty obvious.)
+  11. Correction: 4 Bits: # of Bit Length codes - 4     (4 - 19)
+  12. Note: length code 284 can represent 227-258, but length code 285
+      really is 258.  The last length deserves its own, short code
+      since it gets used a lot in very redundant files.  The length
+      258 is special since 258 - 3 (the min match length) is 255.
+  13. The literal/length and distance code bit lengths are read as a
+      single stream of lengths.  It is possible (and advantageous) for
+      a repeat code (16, 17, or 18) to go across the boundary between
+      the two sets of lengths.
+ */
+
+
+void zlib_inflate_blocks_reset(
+	inflate_blocks_statef *s,
+	z_streamp z,
+	uLong *c
+)
+{
+  if (c != NULL)
+    *c = s->check;
+  if (s->mode == CODES)
+    zlib_inflate_codes_free(s->sub.decode.codes, z);
+  s->mode = TYPE;
+  s->bitk = 0;
+  s->bitb = 0;
+  s->read = s->write = s->window;
+  if (s->checkfn != NULL)
+    z->adler = s->check = (*s->checkfn)(0L, NULL, 0);
+}
+
+inflate_blocks_statef *zlib_inflate_blocks_new(
+	z_streamp z,
+	check_func c,
+	uInt w
+)
+{
+  inflate_blocks_statef *s;
+
+  s = &WS(z)->working_blocks_state;
+  s->hufts = WS(z)->working_hufts;
+  s->window = WS(z)->working_window;
+  s->end = s->window + w;
+  s->checkfn = c;
+  s->mode = TYPE;
+  zlib_inflate_blocks_reset(s, z, NULL);
+  return s;
+}
+
+
+int zlib_inflate_blocks(
+	inflate_blocks_statef *s,
+	z_streamp z,
+	int r
+)
+{
+  uInt t;               /* temporary storage */
+  uLong b;              /* bit buffer */
+  uInt k;               /* bits in bit buffer */
+  Byte *p;              /* input data pointer */
+  uInt n;               /* bytes available there */
+  Byte *q;              /* output window write pointer */
+  uInt m;               /* bytes to end of window or read pointer */
+
+  /* copy input/output information to locals (UPDATE macro restores) */
+  LOAD
+
+  /* process input based on current state */
+  while (1) switch (s->mode)
+  {
+    case TYPE:
+      NEEDBITS(3)
+      t = (uInt)b & 7;
+      s->last = t & 1;
+      switch (t >> 1)
+      {
+        case 0:                         /* stored */
+          DUMPBITS(3)
+          t = k & 7;                    /* go to byte boundary */
+          DUMPBITS(t)
+          s->mode = LENS;               /* get length of stored block */
+          break;
+        case 1:                         /* fixed */
+          {
+            uInt bl, bd;
+            inflate_huft *tl, *td;
+
+            zlib_inflate_trees_fixed(&bl, &bd, &tl, &td, s->hufts, z);
+            s->sub.decode.codes = zlib_inflate_codes_new(bl, bd, tl, td, z);
+            if (s->sub.decode.codes == NULL)
+            {
+              r = Z_MEM_ERROR;
+              LEAVE
+            }
+          }
+          DUMPBITS(3)
+          s->mode = CODES;
+          break;
+        case 2:                         /* dynamic */
+          DUMPBITS(3)
+          s->mode = TABLE;
+          break;
+        case 3:                         /* illegal */
+          DUMPBITS(3)
+          s->mode = B_BAD;
+          z->msg = (char*)"invalid block type";
+          r = Z_DATA_ERROR;
+          LEAVE
+      }
+      break;
+    case LENS:
+      NEEDBITS(32)
+      if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
+      {
+        s->mode = B_BAD;
+        z->msg = (char*)"invalid stored block lengths";
+        r = Z_DATA_ERROR;
+        LEAVE
+      }
+      s->sub.left = (uInt)b & 0xffff;
+      b = k = 0;                      /* dump bits */
+      s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
+      break;
+    case STORED:
+      if (n == 0)
+        LEAVE
+      NEEDOUT
+      t = s->sub.left;
+      if (t > n) t = n;
+      if (t > m) t = m;
+      memcpy(q, p, t);
+      p += t;  n -= t;
+      q += t;  m -= t;
+      if ((s->sub.left -= t) != 0)
+        break;
+      s->mode = s->last ? DRY : TYPE;
+      break;
+    case TABLE:
+      NEEDBITS(14)
+      s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+      if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+      {
+        s->mode = B_BAD;
+        z->msg = (char*)"too many length or distance symbols";
+        r = Z_DATA_ERROR;
+        LEAVE
+      }
+#endif
+      {
+      	s->sub.trees.blens = WS(z)->working_blens;
+      }
+      DUMPBITS(14)
+      s->sub.trees.index = 0;
+      s->mode = BTREE;
+    case BTREE:
+      while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+      {
+        NEEDBITS(3)
+        s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+        DUMPBITS(3)
+      }
+      while (s->sub.trees.index < 19)
+        s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+      s->sub.trees.bb = 7;
+      t = zlib_inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+				  &s->sub.trees.tb, s->hufts, z);
+      if (t != Z_OK)
+      {
+        r = t;
+        if (r == Z_DATA_ERROR)
+          s->mode = B_BAD;
+        LEAVE
+      }
+      s->sub.trees.index = 0;
+      s->mode = DTREE;
+    case DTREE:
+      while (t = s->sub.trees.table,
+             s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+      {
+        inflate_huft *h;
+        uInt i, j, c;
+
+        t = s->sub.trees.bb;
+        NEEDBITS(t)
+        h = s->sub.trees.tb + ((uInt)b & zlib_inflate_mask[t]);
+        t = h->bits;
+        c = h->base;
+        if (c < 16)
+        {
+          DUMPBITS(t)
+          s->sub.trees.blens[s->sub.trees.index++] = c;
+        }
+        else /* c == 16..18 */
+        {
+          i = c == 18 ? 7 : c - 14;
+          j = c == 18 ? 11 : 3;
+          NEEDBITS(t + i)
+          DUMPBITS(t)
+          j += (uInt)b & zlib_inflate_mask[i];
+          DUMPBITS(i)
+          i = s->sub.trees.index;
+          t = s->sub.trees.table;
+          if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+              (c == 16 && i < 1))
+          {
+            s->mode = B_BAD;
+            z->msg = (char*)"invalid bit length repeat";
+            r = Z_DATA_ERROR;
+            LEAVE
+          }
+          c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+          do {
+            s->sub.trees.blens[i++] = c;
+          } while (--j);
+          s->sub.trees.index = i;
+        }
+      }
+      s->sub.trees.tb = NULL;
+      {
+        uInt bl, bd;
+        inflate_huft *tl, *td;
+        inflate_codes_statef *c;
+
+        bl = 9;         /* must be <= 9 for lookahead assumptions */
+        bd = 6;         /* must be <= 9 for lookahead assumptions */
+        t = s->sub.trees.table;
+        t = zlib_inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+				       s->sub.trees.blens, &bl, &bd, &tl, &td,
+				       s->hufts, z);
+        if (t != Z_OK)
+        {
+          if (t == (uInt)Z_DATA_ERROR)
+            s->mode = B_BAD;
+          r = t;
+          LEAVE
+        }
+        if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == NULL)
+        {
+          r = Z_MEM_ERROR;
+          LEAVE
+        }
+        s->sub.decode.codes = c;
+      }
+      s->mode = CODES;
+    case CODES:
+      UPDATE
+      if ((r = zlib_inflate_codes(s, z, r)) != Z_STREAM_END)
+        return zlib_inflate_flush(s, z, r);
+      r = Z_OK;
+      zlib_inflate_codes_free(s->sub.decode.codes, z);
+      LOAD
+      if (!s->last)
+      {
+        s->mode = TYPE;
+        break;
+      }
+      s->mode = DRY;
+    case DRY:
+      FLUSH
+      if (s->read != s->write)
+        LEAVE
+      s->mode = B_DONE;
+    case B_DONE:
+      r = Z_STREAM_END;
+      LEAVE
+    case B_BAD:
+      r = Z_DATA_ERROR;
+      LEAVE
+    default:
+      r = Z_STREAM_ERROR;
+      LEAVE
+  }
+}
+
+
+int zlib_inflate_blocks_free(
+	inflate_blocks_statef *s,
+	z_streamp z
+)
+{
+  zlib_inflate_blocks_reset(s, z, NULL);
+  return Z_OK;
+}
+
+
+void zlib_inflate_set_dictionary(
+	inflate_blocks_statef *s,
+	const Byte *d,
+	uInt  n
+)
+{
+  memcpy(s->window, d, n);
+  s->read = s->write = s->window + n;
+}
+
+
+/* Returns true if inflate is currently at the end of a block generated
+ * by Z_SYNC_FLUSH or Z_FULL_FLUSH. 
+ * IN assertion: s != NULL
+ */
+int zlib_inflate_blocks_sync_point(
+	inflate_blocks_statef *s
+)
+{
+  return s->mode == LENS;
+}
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h
new file mode 100644
index 0000000..f5221dd
--- /dev/null
+++ b/lib/zlib_inflate/infblock.h
@@ -0,0 +1,44 @@
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+   part of the implementation of the compression library and is
+   subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFBLOCK_H
+#define _INFBLOCK_H
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state inflate_blocks_statef;
+
+extern inflate_blocks_statef * zlib_inflate_blocks_new (
+    z_streamp z,
+    check_func c,              /* check function */
+    uInt w);                   /* window size */
+
+extern int zlib_inflate_blocks (
+    inflate_blocks_statef *,
+    z_streamp ,
+    int);                      /* initial return code */
+
+extern void zlib_inflate_blocks_reset (
+    inflate_blocks_statef *,
+    z_streamp ,
+    uLong *);                  /* check value on output */
+
+extern int zlib_inflate_blocks_free (
+    inflate_blocks_statef *,
+    z_streamp);
+
+extern void zlib_inflate_set_dictionary (
+    inflate_blocks_statef *s,
+    const Byte *d,  /* dictionary */
+    uInt  n);       /* dictionary length */
+
+extern int zlib_inflate_blocks_sync_point (
+    inflate_blocks_statef *s);
+
+#endif /* _INFBLOCK_H */
diff --git a/lib/zlib_inflate/infcodes.c b/lib/zlib_inflate/infcodes.c
new file mode 100644
index 0000000..07cd759
--- /dev/null
+++ b/lib/zlib_inflate/infcodes.c
@@ -0,0 +1,202 @@
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infblock.h"
+#include "infcodes.h"
+#include "infutil.h"
+#include "inffast.h"
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+inflate_codes_statef *zlib_inflate_codes_new(
+	uInt bl,
+	uInt bd,
+	inflate_huft *tl,
+	inflate_huft *td, /* need separate declaration for Borland C++ */
+	z_streamp z
+)
+{
+  inflate_codes_statef *c;
+
+  c = &WS(z)->working_state;
+  {
+    c->mode = START;
+    c->lbits = (Byte)bl;
+    c->dbits = (Byte)bd;
+    c->ltree = tl;
+    c->dtree = td;
+  }
+  return c;
+}
+
+
+int zlib_inflate_codes(
+	inflate_blocks_statef *s,
+	z_streamp z,
+	int r
+)
+{
+  uInt j;               /* temporary storage */
+  inflate_huft *t;      /* temporary pointer */
+  uInt e;               /* extra bits or operation */
+  uLong b;              /* bit buffer */
+  uInt k;               /* bits in bit buffer */
+  Byte *p;              /* input data pointer */
+  uInt n;               /* bytes available there */
+  Byte *q;              /* output window write pointer */
+  uInt m;               /* bytes to end of window or read pointer */
+  Byte *f;              /* pointer to copy strings from */
+  inflate_codes_statef *c = s->sub.decode.codes;  /* codes state */
+
+  /* copy input/output information to locals (UPDATE macro restores) */
+  LOAD
+
+  /* process input and output based on current state */
+  while (1) switch (c->mode)
+  {             /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+    case START:         /* x: set up for LEN */
+#ifndef SLOW
+      if (m >= 258 && n >= 10)
+      {
+        UPDATE
+        r = zlib_inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+        LOAD
+        if (r != Z_OK)
+        {
+          c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+          break;
+        }
+      }
+#endif /* !SLOW */
+      c->sub.code.need = c->lbits;
+      c->sub.code.tree = c->ltree;
+      c->mode = LEN;
+    case LEN:           /* i: get length/literal/eob next */
+      j = c->sub.code.need;
+      NEEDBITS(j)
+      t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
+      DUMPBITS(t->bits)
+      e = (uInt)(t->exop);
+      if (e == 0)               /* literal */
+      {
+        c->sub.lit = t->base;
+        c->mode = LIT;
+        break;
+      }
+      if (e & 16)               /* length */
+      {
+        c->sub.copy.get = e & 15;
+        c->len = t->base;
+        c->mode = LENEXT;
+        break;
+      }
+      if ((e & 64) == 0)        /* next table */
+      {
+        c->sub.code.need = e;
+        c->sub.code.tree = t + t->base;
+        break;
+      }
+      if (e & 32)               /* end of block */
+      {
+        c->mode = WASH;
+        break;
+      }
+      c->mode = BADCODE;        /* invalid code */
+      z->msg = (char*)"invalid literal/length code";
+      r = Z_DATA_ERROR;
+      LEAVE
+    case LENEXT:        /* i: getting length extra (have base) */
+      j = c->sub.copy.get;
+      NEEDBITS(j)
+      c->len += (uInt)b & zlib_inflate_mask[j];
+      DUMPBITS(j)
+      c->sub.code.need = c->dbits;
+      c->sub.code.tree = c->dtree;
+      c->mode = DIST;
+    case DIST:          /* i: get distance next */
+      j = c->sub.code.need;
+      NEEDBITS(j)
+      t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
+      DUMPBITS(t->bits)
+      e = (uInt)(t->exop);
+      if (e & 16)               /* distance */
+      {
+        c->sub.copy.get = e & 15;
+        c->sub.copy.dist = t->base;
+        c->mode = DISTEXT;
+        break;
+      }
+      if ((e & 64) == 0)        /* next table */
+      {
+        c->sub.code.need = e;
+        c->sub.code.tree = t + t->base;
+        break;
+      }
+      c->mode = BADCODE;        /* invalid code */
+      z->msg = (char*)"invalid distance code";
+      r = Z_DATA_ERROR;
+      LEAVE
+    case DISTEXT:       /* i: getting distance extra */
+      j = c->sub.copy.get;
+      NEEDBITS(j)
+      c->sub.copy.dist += (uInt)b & zlib_inflate_mask[j];
+      DUMPBITS(j)
+      c->mode = COPY;
+    case COPY:          /* o: copying bytes in window, waiting for space */
+      f = q - c->sub.copy.dist;
+      while (f < s->window)             /* modulo window size-"while" instead */
+        f += s->end - s->window;        /* of "if" handles invalid distances */
+      while (c->len)
+      {
+        NEEDOUT
+        OUTBYTE(*f++)
+        if (f == s->end)
+          f = s->window;
+        c->len--;
+      }
+      c->mode = START;
+      break;
+    case LIT:           /* o: got literal, waiting for output space */
+      NEEDOUT
+      OUTBYTE(c->sub.lit)
+      c->mode = START;
+      break;
+    case WASH:          /* o: got eob, possibly more output */
+      if (k > 7)        /* return unused byte, if any */
+      {
+        k -= 8;
+        n++;
+        p--;            /* can always return one */
+      }
+      FLUSH
+      if (s->read != s->write)
+        LEAVE
+      c->mode = END;
+    case END:
+      r = Z_STREAM_END;
+      LEAVE
+    case BADCODE:       /* x: got error */
+      r = Z_DATA_ERROR;
+      LEAVE
+    default:
+      r = Z_STREAM_ERROR;
+      LEAVE
+  }
+#ifdef NEED_DUMMY_RETURN
+  return Z_STREAM_ERROR;  /* Some dumb compilers complain without this */
+#endif
+}
+
+
+void zlib_inflate_codes_free(
+	inflate_codes_statef *c,
+	z_streamp z
+)
+{
+}
diff --git a/lib/zlib_inflate/infcodes.h b/lib/zlib_inflate/infcodes.h
new file mode 100644
index 0000000..5cff417
--- /dev/null
+++ b/lib/zlib_inflate/infcodes.h
@@ -0,0 +1,33 @@
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+   part of the implementation of the compression library and is
+   subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFCODES_H
+#define _INFCODES_H
+
+#include "infblock.h"
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state inflate_codes_statef;
+
+extern inflate_codes_statef *zlib_inflate_codes_new (
+    uInt, uInt,
+    inflate_huft *, inflate_huft *,
+    z_streamp );
+
+extern int zlib_inflate_codes (
+    inflate_blocks_statef *,
+    z_streamp ,
+    int);
+
+extern void zlib_inflate_codes_free (
+    inflate_codes_statef *,
+    z_streamp );
+
+#endif /* _INFCODES_H */
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
new file mode 100644
index 0000000..0bd7623
--- /dev/null
+++ b/lib/zlib_inflate/inffast.c
@@ -0,0 +1,176 @@
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infblock.h"
+#include "infcodes.h"
+#include "infutil.h"
+#include "inffast.h"
+
+struct inflate_codes_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {c=z->avail_in-n;c=(k>>3)<c?k>>3:c;n+=c;p-=c;k-=c<<3;}
+
+/* Called with number of bytes left to write in window at least 258
+   (the maximum string length) and number of input bytes available
+   at least ten.  The ten bytes are six bytes for the longest length/
+   distance pair plus four bytes for overloading the bit buffer. */
+
+int zlib_inflate_fast(
+	uInt bl,
+	uInt bd,
+	inflate_huft *tl,
+	inflate_huft *td, /* need separate declaration for Borland C++ */
+	inflate_blocks_statef *s,
+	z_streamp z
+)
+{
+  inflate_huft *t;      /* temporary pointer */
+  uInt e;               /* extra bits or operation */
+  uLong b;              /* bit buffer */
+  uInt k;               /* bits in bit buffer */
+  Byte *p;              /* input data pointer */
+  uInt n;               /* bytes available there */
+  Byte *q;              /* output window write pointer */
+  uInt m;               /* bytes to end of window or read pointer */
+  uInt ml;              /* mask for literal/length tree */
+  uInt md;              /* mask for distance tree */
+  uInt c;               /* bytes to copy */
+  uInt d;               /* distance back to copy from */
+  Byte *r;              /* copy source pointer */
+
+  /* load input, output, bit values */
+  LOAD
+
+  /* initialize masks */
+  ml = zlib_inflate_mask[bl];
+  md = zlib_inflate_mask[bd];
+
+  /* do until not enough input or output space for fast loop */
+  do {                          /* assume called with m >= 258 && n >= 10 */
+    /* get literal/length code */
+    GRABBITS(20)                /* max bits for literal/length code */
+    if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+    {
+      DUMPBITS(t->bits)
+      *q++ = (Byte)t->base;
+      m--;
+      continue;
+    }
+    do {
+      DUMPBITS(t->bits)
+      if (e & 16)
+      {
+        /* get extra bits for length */
+        e &= 15;
+        c = t->base + ((uInt)b & zlib_inflate_mask[e]);
+        DUMPBITS(e)
+
+        /* decode distance base of block to copy */
+        GRABBITS(15);           /* max bits for distance code */
+        e = (t = td + ((uInt)b & md))->exop;
+        do {
+          DUMPBITS(t->bits)
+          if (e & 16)
+          {
+            /* get extra bits to add to distance base */
+            e &= 15;
+            GRABBITS(e)         /* get extra bits (up to 13) */
+            d = t->base + ((uInt)b & zlib_inflate_mask[e]);
+            DUMPBITS(e)
+
+            /* do the copy */
+            m -= c;
+            r = q - d;
+            if (r < s->window)                  /* wrap if needed */
+            {
+              do {
+                r += s->end - s->window;        /* force pointer in window */
+              } while (r < s->window);          /* covers invalid distances */
+              e = s->end - r;
+              if (c > e)
+              {
+                c -= e;                         /* wrapped copy */
+                do {
+                    *q++ = *r++;
+                } while (--e);
+                r = s->window;
+                do {
+                    *q++ = *r++;
+                } while (--c);
+              }
+              else                              /* normal copy */
+              {
+                *q++ = *r++;  c--;
+                *q++ = *r++;  c--;
+                do {
+                    *q++ = *r++;
+                } while (--c);
+              }
+            }
+            else                                /* normal copy */
+            {
+              *q++ = *r++;  c--;
+              *q++ = *r++;  c--;
+              do {
+                *q++ = *r++;
+              } while (--c);
+            }
+            break;
+          }
+          else if ((e & 64) == 0)
+          {
+            t += t->base;
+            e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop;
+          }
+          else
+          {
+            z->msg = (char*)"invalid distance code";
+            UNGRAB
+            UPDATE
+            return Z_DATA_ERROR;
+          }
+        } while (1);
+        break;
+      }
+      if ((e & 64) == 0)
+      {
+        t += t->base;
+        if ((e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop) == 0)
+        {
+          DUMPBITS(t->bits)
+          *q++ = (Byte)t->base;
+          m--;
+          break;
+        }
+      }
+      else if (e & 32)
+      {
+        UNGRAB
+        UPDATE
+        return Z_STREAM_END;
+      }
+      else
+      {
+        z->msg = (char*)"invalid literal/length code";
+        UNGRAB
+        UPDATE
+        return Z_DATA_ERROR;
+      }
+    } while (1);
+  } while (m >= 258 && n >= 10);
+
+  /* not enough input or output--restore pointers and return */
+  UNGRAB
+  UPDATE
+  return Z_OK;
+}
diff --git a/lib/zlib_inflate/inffast.h b/lib/zlib_inflate/inffast.h
new file mode 100644
index 0000000..fc720f0
--- /dev/null
+++ b/lib/zlib_inflate/inffast.h
@@ -0,0 +1,17 @@
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+   part of the implementation of the compression library and is
+   subject to change. Applications should only use zlib.h.
+ */
+
+extern int zlib_inflate_fast (
+    uInt,
+    uInt,
+    inflate_huft *,
+    inflate_huft *,
+    inflate_blocks_statef *,
+    z_streamp );
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
new file mode 100644
index 0000000..3d94cb9
--- /dev/null
+++ b/lib/zlib_inflate/inflate.c
@@ -0,0 +1,248 @@
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "infutil.h"
+
+int zlib_inflate_workspacesize(void)
+{
+  return sizeof(struct inflate_workspace);
+}
+
+
+int zlib_inflateReset(
+	z_streamp z
+)
+{
+  if (z == NULL || z->state == NULL || z->workspace == NULL)
+    return Z_STREAM_ERROR;
+  z->total_in = z->total_out = 0;
+  z->msg = NULL;
+  z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+  zlib_inflate_blocks_reset(z->state->blocks, z, NULL);
+  return Z_OK;
+}
+
+
+int zlib_inflateEnd(
+	z_streamp z
+)
+{
+  if (z == NULL || z->state == NULL || z->workspace == NULL)
+    return Z_STREAM_ERROR;
+  if (z->state->blocks != NULL)
+    zlib_inflate_blocks_free(z->state->blocks, z);
+  z->state = NULL;
+  return Z_OK;
+}
+
+
+int zlib_inflateInit2_(
+	z_streamp z,
+	int w,
+	const char *version,
+	int stream_size
+)
+{
+  if (version == NULL || version[0] != ZLIB_VERSION[0] ||
+      stream_size != sizeof(z_stream) || z->workspace == NULL)
+      return Z_VERSION_ERROR;
+
+  /* initialize state */
+  z->msg = NULL;
+  z->state = &WS(z)->internal_state;
+  z->state->blocks = NULL;
+
+  /* handle undocumented nowrap option (no zlib header or check) */
+  z->state->nowrap = 0;
+  if (w < 0)
+  {
+    w = - w;
+    z->state->nowrap = 1;
+  }
+
+  /* set window size */
+  if (w < 8 || w > 15)
+  {
+    zlib_inflateEnd(z);
+    return Z_STREAM_ERROR;
+  }
+  z->state->wbits = (uInt)w;
+
+  /* create inflate_blocks state */
+  if ((z->state->blocks =
+      zlib_inflate_blocks_new(z, z->state->nowrap ? NULL : zlib_adler32, (uInt)1 << w))
+      == NULL)
+  {
+    zlib_inflateEnd(z);
+    return Z_MEM_ERROR;
+  }
+
+  /* reset state */
+  zlib_inflateReset(z);
+  return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+static int zlib_inflate_packet_flush(inflate_blocks_statef *s)
+{
+    if (s->mode != LENS)
+	return Z_DATA_ERROR;
+    s->mode = TYPE;
+    return Z_OK;
+}
+
+
+int zlib_inflateInit_(
+	z_streamp z,
+	const char *version,
+	int stream_size
+)
+{
+  return zlib_inflateInit2_(z, DEF_WBITS, version, stream_size);
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=trv;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int zlib_inflate(
+	z_streamp z,
+	int f
+)
+{
+  int r, trv;
+  uInt b;
+
+  if (z == NULL || z->state == NULL || z->next_in == NULL)
+    return Z_STREAM_ERROR;
+  trv = f == Z_FINISH ? Z_BUF_ERROR : Z_OK;
+  r = Z_BUF_ERROR;
+  while (1) switch (z->state->mode)
+  {
+    case METHOD:
+      NEEDBYTE
+      if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
+      {
+        z->state->mode = I_BAD;
+        z->msg = (char*)"unknown compression method";
+        z->state->sub.marker = 5;       /* can't try inflateSync */
+        break;
+      }
+      if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+      {
+        z->state->mode = I_BAD;
+        z->msg = (char*)"invalid window size";
+        z->state->sub.marker = 5;       /* can't try inflateSync */
+        break;
+      }
+      z->state->mode = FLAG;
+    case FLAG:
+      NEEDBYTE
+      b = NEXTBYTE;
+      if (((z->state->sub.method << 8) + b) % 31)
+      {
+        z->state->mode = I_BAD;
+        z->msg = (char*)"incorrect header check";
+        z->state->sub.marker = 5;       /* can't try inflateSync */
+        break;
+      }
+      if (!(b & PRESET_DICT))
+      {
+        z->state->mode = BLOCKS;
+        break;
+      }
+      z->state->mode = DICT4;
+    case DICT4:
+      NEEDBYTE
+      z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+      z->state->mode = DICT3;
+    case DICT3:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+      z->state->mode = DICT2;
+    case DICT2:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+      z->state->mode = DICT1;
+    case DICT1:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE;
+      z->adler = z->state->sub.check.need;
+      z->state->mode = DICT0;
+      return Z_NEED_DICT;
+    case DICT0:
+      z->state->mode = I_BAD;
+      z->msg = (char*)"need dictionary";
+      z->state->sub.marker = 0;       /* can try inflateSync */
+      return Z_STREAM_ERROR;
+    case BLOCKS:
+      r = zlib_inflate_blocks(z->state->blocks, z, r);
+      if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+	  r = zlib_inflate_packet_flush(z->state->blocks);
+      if (r == Z_DATA_ERROR)
+      {
+        z->state->mode = I_BAD;
+        z->state->sub.marker = 0;       /* can try inflateSync */
+        break;
+      }
+      if (r == Z_OK)
+        r = trv;
+      if (r != Z_STREAM_END)
+        return r;
+      r = trv;
+      zlib_inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+      if (z->state->nowrap)
+      {
+        z->state->mode = I_DONE;
+        break;
+      }
+      z->state->mode = CHECK4;
+    case CHECK4:
+      NEEDBYTE
+      z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+      z->state->mode = CHECK3;
+    case CHECK3:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+      z->state->mode = CHECK2;
+    case CHECK2:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+      z->state->mode = CHECK1;
+    case CHECK1:
+      NEEDBYTE
+      z->state->sub.check.need += (uLong)NEXTBYTE;
+
+      if (z->state->sub.check.was != z->state->sub.check.need)
+      {
+        z->state->mode = I_BAD;
+        z->msg = (char*)"incorrect data check";
+        z->state->sub.marker = 5;       /* can't try inflateSync */
+        break;
+      }
+      z->state->mode = I_DONE;
+    case I_DONE:
+      return Z_STREAM_END;
+    case I_BAD:
+      return Z_DATA_ERROR;
+    default:
+      return Z_STREAM_ERROR;
+  }
+ empty:
+  if (f != Z_PACKET_FLUSH)
+    return r;
+  z->state->mode = I_BAD;
+  z->msg = (char *)"need more for packet flush";
+  z->state->sub.marker = 0;       /* can try inflateSync */
+  return Z_DATA_ERROR;
+}
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
new file mode 100644
index 0000000..aa1b081
--- /dev/null
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -0,0 +1,22 @@
+/*
+ * linux/lib/zlib_inflate/inflate_syms.c
+ *
+ * Exported symbols for the inflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_inflate_workspacesize);
+EXPORT_SYMBOL(zlib_inflate);
+EXPORT_SYMBOL(zlib_inflateInit_);
+EXPORT_SYMBOL(zlib_inflateInit2_);
+EXPORT_SYMBOL(zlib_inflateEnd);
+EXPORT_SYMBOL(zlib_inflateSync);
+EXPORT_SYMBOL(zlib_inflateReset);
+EXPORT_SYMBOL(zlib_inflateSyncPoint);
+EXPORT_SYMBOL(zlib_inflateIncomp); 
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c
new file mode 100644
index 0000000..e07bdb2
--- /dev/null
+++ b/lib/zlib_inflate/inflate_sync.c
@@ -0,0 +1,148 @@
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "infutil.h"
+
+int zlib_inflateSync(
+	z_streamp z
+)
+{
+  uInt n;       /* number of bytes to look at */
+  Byte *p;      /* pointer to bytes */
+  uInt m;       /* number of marker bytes found in a row */
+  uLong r, w;   /* temporaries to save total_in and total_out */
+
+  /* set up */
+  if (z == NULL || z->state == NULL)
+    return Z_STREAM_ERROR;
+  if (z->state->mode != I_BAD)
+  {
+    z->state->mode = I_BAD;
+    z->state->sub.marker = 0;
+  }
+  if ((n = z->avail_in) == 0)
+    return Z_BUF_ERROR;
+  p = z->next_in;
+  m = z->state->sub.marker;
+
+  /* search */
+  while (n && m < 4)
+  {
+    static const Byte mark[4] = {0, 0, 0xff, 0xff};
+    if (*p == mark[m])
+      m++;
+    else if (*p)
+      m = 0;
+    else
+      m = 4 - m;
+    p++, n--;
+  }
+
+  /* restore */
+  z->total_in += p - z->next_in;
+  z->next_in = p;
+  z->avail_in = n;
+  z->state->sub.marker = m;
+
+  /* return no joy or set up to restart on a new block */
+  if (m != 4)
+    return Z_DATA_ERROR;
+  r = z->total_in;  w = z->total_out;
+  zlib_inflateReset(z);
+  z->total_in = r;  z->total_out = w;
+  z->state->mode = BLOCKS;
+  return Z_OK;
+}
+
+
+/* Returns true if inflate is currently at the end of a block generated
+ * by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ * implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
+ * but removes the length bytes of the resulting empty stored block. When
+ * decompressing, PPP checks that at the end of input packet, inflate is
+ * waiting for these length bytes.
+ */
+int zlib_inflateSyncPoint(
+	z_streamp z
+)
+{
+  if (z == NULL || z->state == NULL || z->state->blocks == NULL)
+    return Z_STREAM_ERROR;
+  return zlib_inflate_blocks_sync_point(z->state->blocks);
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output.  The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS).  On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+static int zlib_inflate_addhistory(inflate_blocks_statef *s,
+				      z_stream              *z)
+{
+    uLong b;              /* bit buffer */  /* NOT USED HERE */
+    uInt k;               /* bits in bit buffer */ /* NOT USED HERE */
+    uInt t;               /* temporary storage */
+    Byte *p;              /* input data pointer */
+    uInt n;               /* bytes available there */
+    Byte *q;              /* output window write pointer */
+    uInt m;               /* bytes to end of window or read pointer */
+
+    if (s->read != s->write)
+	return Z_STREAM_ERROR;
+    if (s->mode != TYPE)
+	return Z_DATA_ERROR;
+
+    /* we're ready to rock */
+    LOAD
+    /* while there is input ready, copy to output buffer, moving
+     * pointers as needed.
+     */
+    while (n) {
+	t = n;  /* how many to do */
+	/* is there room until end of buffer? */
+	if (t > m) t = m;
+	/* update check information */
+	if (s->checkfn != NULL)
+	    s->check = (*s->checkfn)(s->check, q, t);
+	memcpy(q, p, t);
+	q += t;
+	p += t;
+	n -= t;
+	z->total_out += t;
+	s->read = q;    /* drag read pointer forward */
+/*      WWRAP  */ 	/* expand WWRAP macro by hand to handle s->read */
+	if (q == s->end) {
+	    s->read = q = s->window;
+	    m = WAVAIL;
+	}
+    }
+    UPDATE
+    return Z_OK;
+}
+
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output.  The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS).  On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int zlib_inflateIncomp(
+	z_stream *z
+
+)
+{
+    if (z->state->mode != BLOCKS)
+	return Z_DATA_ERROR;
+    return zlib_inflate_addhistory(z->state->blocks, z);
+}
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
new file mode 100644
index 0000000..874950e
--- /dev/null
+++ b/lib/zlib_inflate/inftrees.c
@@ -0,0 +1,412 @@
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infutil.h"
+
+static const char inflate_copyright[] __attribute_used__ =
+   " inflate 1.1.3 Copyright 1995-1998 Mark Adler ";
+/*
+  If you use the zlib library in a product, an acknowledgment is welcome
+  in the documentation of your product. If for some reason you cannot
+  include such an acknowledgment, I would appreciate that you keep this
+  copyright string in the executable of your product.
+ */
+struct internal_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+static int huft_build (
+    uInt *,             /* code lengths in bits */
+    uInt,               /* number of codes */
+    uInt,               /* number of "simple" codes */
+    const uInt *,       /* list of base values for non-simple codes */
+    const uInt *,       /* list of extra bits for non-simple codes */
+    inflate_huft **,    /* result: starting table */
+    uInt *,             /* maximum lookup bits (returns actual) */
+    inflate_huft *,     /* space for trees */
+    uInt *,             /* hufts used in space */
+    uInt * );           /* space for values */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+static const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
+        3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+        35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+        /* see note #13 above about 258 */
+static const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
+        0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+        3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
+static const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
+        1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+        257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+        8193, 12289, 16385, 24577};
+static const uInt cpdext[30] = { /* Extra bits for distance codes */
+        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+        7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+        12, 12, 13, 13};
+
+/*
+   Huffman code decoding is performed using a multi-level table lookup.
+   The fastest way to decode is to simply build a lookup table whose
+   size is determined by the longest code.  However, the time it takes
+   to build this table can also be a factor if the data being decoded
+   is not very long.  The most common codes are necessarily the
+   shortest codes, so those codes dominate the decoding time, and hence
+   the speed.  The idea is you can have a shorter table that decodes the
+   shorter, more probable codes, and then point to subsidiary tables for
+   the longer codes.  The time it costs to decode the longer codes is
+   then traded against the time it takes to make longer tables.
+
+   This results of this trade are in the variables lbits and dbits
+   below.  lbits is the number of bits the first level table for literal/
+   length codes can decode in one step, and dbits is the same thing for
+   the distance codes.  Subsequent tables are also less than or equal to
+   those sizes.  These values may be adjusted either when all of the
+   codes are shorter than that, in which case the longest code length in
+   bits is used, or when the shortest code is *longer* than the requested
+   table size, in which case the length of the shortest code in bits is
+   used.
+
+   There are two different values for the two tables, since they code a
+   different number of possibilities each.  The literal/length table
+   codes 286 possible values, or in a flat code, a little over eight
+   bits.  The distance table codes 30 possible values, or a little less
+   than five bits, flat.  The optimum values for speed end up being
+   about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+   The optimum values may differ though from machine to machine, and
+   possibly even between compilers.  Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15         /* maximum bit length of any code */
+
+static int huft_build(
+	uInt *b,               /* code lengths in bits (all assumed <= BMAX) */
+	uInt n,                /* number of codes (assumed <= 288) */
+	uInt s,                /* number of simple-valued codes (0..s-1) */
+	const uInt *d,         /* list of base values for non-simple codes */
+	const uInt *e,         /* list of extra bits for non-simple codes */
+	inflate_huft **t,      /* result: starting table */
+	uInt *m,               /* maximum lookup bits, returns actual */
+	inflate_huft *hp,      /* space for trees */
+	uInt *hn,              /* hufts used in space */
+	uInt *v                /* working area: values in order of bit length */
+)
+/* Given a list of code lengths and a maximum table size, make a set of
+   tables to decode that set of codes.  Return Z_OK on success, Z_BUF_ERROR
+   if the given code set is incomplete (the tables are still built in this
+   case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+   lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+  uInt a;                       /* counter for codes of length k */
+  uInt c[BMAX+1];               /* bit length count table */
+  uInt f;                       /* i repeats in table every f entries */
+  int g;                        /* maximum code length */
+  int h;                        /* table level */
+  register uInt i;              /* counter, current code */
+  register uInt j;              /* counter */
+  register int k;               /* number of bits in current code */
+  int l;                        /* bits per table (returned in m) */
+  uInt mask;                    /* (1 << w) - 1, to avoid cc -O bug on HP */
+  register uInt *p;             /* pointer into c[], b[], or v[] */
+  inflate_huft *q;              /* points to current table */
+  struct inflate_huft_s r;      /* table entry for structure assignment */
+  inflate_huft *u[BMAX];        /* table stack */
+  register int w;               /* bits before this table == (l * h) */
+  uInt x[BMAX+1];               /* bit offsets, then code stack */
+  uInt *xp;                     /* pointer into x */
+  int y;                        /* number of dummy codes added */
+  uInt z;                       /* number of entries in current table */
+
+
+  /* Generate counts for each bit length */
+  p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+  C4                            /* clear c[]--assume BMAX+1 is 16 */
+  p = b;  i = n;
+  do {
+    c[*p++]++;                  /* assume all entries <= BMAX */
+  } while (--i);
+  if (c[0] == n)                /* null input--all zero length codes */
+  {
+    *t = NULL;
+    *m = 0;
+    return Z_OK;
+  }
+
+
+  /* Find minimum and maximum length, bound *m by those */
+  l = *m;
+  for (j = 1; j <= BMAX; j++)
+    if (c[j])
+      break;
+  k = j;                        /* minimum code length */
+  if ((uInt)l < j)
+    l = j;
+  for (i = BMAX; i; i--)
+    if (c[i])
+      break;
+  g = i;                        /* maximum code length */
+  if ((uInt)l > i)
+    l = i;
+  *m = l;
+
+
+  /* Adjust last length count to fill out codes, if needed */
+  for (y = 1 << j; j < i; j++, y <<= 1)
+    if ((y -= c[j]) < 0)
+      return Z_DATA_ERROR;
+  if ((y -= c[i]) < 0)
+    return Z_DATA_ERROR;
+  c[i] += y;
+
+
+  /* Generate starting offsets into the value table for each length */
+  x[1] = j = 0;
+  p = c + 1;  xp = x + 2;
+  while (--i) {                 /* note that i == g from above */
+    *xp++ = (j += *p++);
+  }
+
+
+  /* Make a table of values in order of bit lengths */
+  p = b;  i = 0;
+  do {
+    if ((j = *p++) != 0)
+      v[x[j]++] = i;
+  } while (++i < n);
+  n = x[g];                     /* set n to length of v */
+
+
+  /* Generate the Huffman codes and for each, make the table entries */
+  x[0] = i = 0;                 /* first Huffman code is zero */
+  p = v;                        /* grab values in bit order */
+  h = -1;                       /* no tables yet--level -1 */
+  w = -l;                       /* bits decoded == (l * h) */
+  u[0] = NULL;                  /* just to keep compilers happy */
+  q = NULL;                     /* ditto */
+  z = 0;                        /* ditto */
+
+  /* go through the bit lengths (k already is bits in shortest code) */
+  for (; k <= g; k++)
+  {
+    a = c[k];
+    while (a--)
+    {
+      /* here i is the Huffman code of length k bits for value *p */
+      /* make tables up to required level */
+      while (k > w + l)
+      {
+        h++;
+        w += l;                 /* previous table always l bits */
+
+        /* compute minimum size table less than or equal to l bits */
+        z = g - w;
+        z = z > (uInt)l ? l : z;        /* table size upper limit */
+        if ((f = 1 << (j = k - w)) > a + 1)     /* try a k-w bit table */
+        {                       /* too few codes for k-w bit table */
+          f -= a + 1;           /* deduct codes from patterns left */
+          xp = c + k;
+          if (j < z)
+            while (++j < z)     /* try smaller tables up to z bits */
+            {
+              if ((f <<= 1) <= *++xp)
+                break;          /* enough codes to use up j bits */
+              f -= *xp;         /* else deduct codes from patterns */
+            }
+        }
+        z = 1 << j;             /* table entries for j-bit table */
+
+        /* allocate new table */
+        if (*hn + z > MANY)     /* (note: doesn't matter for fixed) */
+          return Z_DATA_ERROR;  /* overflow of MANY */
+        u[h] = q = hp + *hn;
+        *hn += z;
+
+        /* connect to last table, if there is one */
+        if (h)
+        {
+          x[h] = i;             /* save pattern for backing up */
+          r.bits = (Byte)l;     /* bits to dump before this table */
+          r.exop = (Byte)j;     /* bits in this table */
+          j = i >> (w - l);
+          r.base = (uInt)(q - u[h-1] - j);   /* offset to this table */
+          u[h-1][j] = r;        /* connect to last table */
+        }
+        else
+          *t = q;               /* first table is returned result */
+      }
+
+      /* set up table entry in r */
+      r.bits = (Byte)(k - w);
+      if (p >= v + n)
+        r.exop = 128 + 64;      /* out of values--invalid code */
+      else if (*p < s)
+      {
+        r.exop = (Byte)(*p < 256 ? 0 : 32 + 64);     /* 256 is end-of-block */
+        r.base = *p++;          /* simple code is just the value */
+      }
+      else
+      {
+        r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
+        r.base = d[*p++ - s];
+      }
+
+      /* fill code-like entries with r */
+      f = 1 << (k - w);
+      for (j = i >> w; j < z; j += f)
+        q[j] = r;
+
+      /* backwards increment the k-bit code i */
+      for (j = 1 << (k - 1); i & j; j >>= 1)
+        i ^= j;
+      i ^= j;
+
+      /* backup over finished tables */
+      mask = (1 << w) - 1;      /* needed on HP, cc -O bug */
+      while ((i & mask) != x[h])
+      {
+        h--;                    /* don't need to update q */
+        w -= l;
+        mask = (1 << w) - 1;
+      }
+    }
+  }
+
+
+  /* Return Z_BUF_ERROR if we were given an incomplete table */
+  return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+int zlib_inflate_trees_bits(
+	uInt *c,                /* 19 code lengths */
+	uInt *bb,               /* bits tree desired/actual depth */
+	inflate_huft **tb,      /* bits tree result */
+	inflate_huft *hp,       /* space for trees */
+	z_streamp z             /* for messages */
+)
+{
+  int r;
+  uInt hn = 0;          /* hufts used in space */
+  uInt *v;              /* work area for huft_build */
+  
+  v = WS(z)->tree_work_area_1;
+  r = huft_build(c, 19, 19, NULL, NULL, tb, bb, hp, &hn, v);
+  if (r == Z_DATA_ERROR)
+    z->msg = (char*)"oversubscribed dynamic bit lengths tree";
+  else if (r == Z_BUF_ERROR || *bb == 0)
+  {
+    z->msg = (char*)"incomplete dynamic bit lengths tree";
+    r = Z_DATA_ERROR;
+  }
+  return r;
+}
+
+int zlib_inflate_trees_dynamic(
+	uInt nl,                /* number of literal/length codes */
+	uInt nd,                /* number of distance codes */
+	uInt *c,                /* that many (total) code lengths */
+	uInt *bl,               /* literal desired/actual bit depth */
+	uInt *bd,               /* distance desired/actual bit depth */
+	inflate_huft **tl,      /* literal/length tree result */
+	inflate_huft **td,      /* distance tree result */
+	inflate_huft *hp,       /* space for trees */
+	z_streamp z             /* for messages */
+)
+{
+  int r;
+  uInt hn = 0;          /* hufts used in space */
+  uInt *v;              /* work area for huft_build */
+
+  /* allocate work area */
+  v = WS(z)->tree_work_area_2;
+
+  /* build literal/length tree */
+  r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v);
+  if (r != Z_OK || *bl == 0)
+  {
+    if (r == Z_DATA_ERROR)
+      z->msg = (char*)"oversubscribed literal/length tree";
+    else if (r != Z_MEM_ERROR)
+    {
+      z->msg = (char*)"incomplete literal/length tree";
+      r = Z_DATA_ERROR;
+    }
+    return r;
+  }
+
+  /* build distance tree */
+  r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v);
+  if (r != Z_OK || (*bd == 0 && nl > 257))
+  {
+    if (r == Z_DATA_ERROR)
+      z->msg = (char*)"oversubscribed distance tree";
+    else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+      r = Z_OK;
+    }
+#else
+      z->msg = (char*)"incomplete distance tree";
+      r = Z_DATA_ERROR;
+    }
+    else if (r != Z_MEM_ERROR)
+    {
+      z->msg = (char*)"empty distance tree with lengths";
+      r = Z_DATA_ERROR;
+    }
+    return r;
+#endif
+  }
+
+  /* done */
+  return Z_OK;
+}
+
+
+int zlib_inflate_trees_fixed(
+	uInt *bl,                /* literal desired/actual bit depth */
+	uInt *bd,                /* distance desired/actual bit depth */
+	inflate_huft **tl,       /* literal/length tree result */
+	inflate_huft **td,       /* distance tree result */
+	inflate_huft *hp,       /* space for trees */
+	z_streamp z              /* for memory allocation */
+)
+{
+  int i;                /* temporary variable */
+  unsigned l[288];      /* length list for huft_build */
+  uInt *v;              /* work area for huft_build */
+
+  /* set up literal table */
+  for (i = 0; i < 144; i++)
+    l[i] = 8;
+  for (; i < 256; i++)
+    l[i] = 9;
+  for (; i < 280; i++)
+    l[i] = 7;
+  for (; i < 288; i++)          /* make a complete, but wrong code set */
+    l[i] = 8;
+  *bl = 9;
+  v = WS(z)->tree_work_area_1;
+  if ((i = huft_build(l, 288, 257, cplens, cplext, tl, bl, hp,  &i, v)) != 0)
+    return i;
+
+  /* set up distance table */
+  for (i = 0; i < 30; i++)      /* make an incomplete code set */
+    l[i] = 5;
+  *bd = 5;
+  if ((i = huft_build(l, 30, 0, cpdist, cpdext, td, bd, hp, &i, v)) > 1)
+    return i;
+
+  return Z_OK;
+}
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
new file mode 100644
index 0000000..e37705a
--- /dev/null
+++ b/lib/zlib_inflate/inftrees.h
@@ -0,0 +1,64 @@
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+   part of the implementation of the compression library and is
+   subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+   that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+#ifndef _INFTREES_H
+#define _INFTREES_H
+
+typedef struct inflate_huft_s inflate_huft;
+
+struct inflate_huft_s {
+  union {
+    struct {
+      Byte Exop;        /* number of extra bits or operation */
+      Byte Bits;        /* number of bits in this code or subcode */
+    } what;
+    uInt pad;           /* pad structure to a power of 2 (4 bytes for */
+  } word;               /*  16-bit, 8 bytes for 32-bit int's) */
+  uInt base;            /* literal, length base, distance base,
+                           or table offset */
+};
+
+/* Maximum size of dynamic tree.  The maximum found in a long but non-
+   exhaustive search was 1004 huft structures (850 for length/literals
+   and 154 for distances, the latter actually the result of an
+   exhaustive search).  The actual maximum is not known, but the
+   value below is more than safe. */
+#define MANY 1440
+
+extern int zlib_inflate_trees_bits (
+    uInt *,                     /* 19 code lengths */
+    uInt *,                     /* bits tree desired/actual depth */
+    inflate_huft **,            /* bits tree result */
+    inflate_huft *,             /* space for trees */
+    z_streamp);                 /* for messages */
+
+extern int zlib_inflate_trees_dynamic (
+    uInt,                       /* number of literal/length codes */
+    uInt,                       /* number of distance codes */
+    uInt *,                     /* that many (total) code lengths */
+    uInt *,                     /* literal desired/actual bit depth */
+    uInt *,                     /* distance desired/actual bit depth */
+    inflate_huft **,            /* literal/length tree result */
+    inflate_huft **,            /* distance tree result */
+    inflate_huft *,             /* space for trees */
+    z_streamp);                 /* for messages */
+
+extern int zlib_inflate_trees_fixed (
+    uInt *,                     /* literal desired/actual bit depth */
+    uInt *,                     /* distance desired/actual bit depth */
+    inflate_huft **,            /* literal/length tree result */
+    inflate_huft **,            /* distance tree result */
+    inflate_huft *,             /* space for trees */
+    z_streamp);                 /* for memory allocation */
+
+#endif /* _INFTREES_H */
diff --git a/lib/zlib_inflate/infutil.c b/lib/zlib_inflate/infutil.c
new file mode 100644
index 0000000..00202b3
--- /dev/null
+++ b/lib/zlib_inflate/infutil.c
@@ -0,0 +1,88 @@
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "inftrees.h"
+#include "infcodes.h"
+#include "infutil.h"
+
+struct inflate_codes_state;
+
+/* And'ing with mask[n] masks the lower n bits */
+uInt zlib_inflate_mask[17] = {
+    0x0000,
+    0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+    0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+
+/* copy as much as possible from the sliding window to the output area */
+int zlib_inflate_flush(
+	inflate_blocks_statef *s,
+	z_streamp z,
+	int r
+)
+{
+  uInt n;
+  Byte *p;
+  Byte *q;
+
+  /* local copies of source and destination pointers */
+  p = z->next_out;
+  q = s->read;
+
+  /* compute number of bytes to copy as far as end of window */
+  n = (uInt)((q <= s->write ? s->write : s->end) - q);
+  if (n > z->avail_out) n = z->avail_out;
+  if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+  /* update counters */
+  z->avail_out -= n;
+  z->total_out += n;
+
+  /* update check information */
+  if (s->checkfn != NULL)
+    z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+  /* copy as far as end of window */
+  memcpy(p, q, n);
+  p += n;
+  q += n;
+
+  /* see if more to copy at beginning of window */
+  if (q == s->end)
+  {
+    /* wrap pointers */
+    q = s->window;
+    if (s->write == s->end)
+      s->write = s->window;
+
+    /* compute bytes to copy */
+    n = (uInt)(s->write - q);
+    if (n > z->avail_out) n = z->avail_out;
+    if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+    /* update counters */
+    z->avail_out -= n;
+    z->total_out += n;
+
+    /* update check information */
+    if (s->checkfn != NULL)
+      z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+    /* copy */
+    memcpy(p, q, n);
+    p += n;
+    q += n;
+  }
+
+  /* update pointers */
+  z->next_out = p;
+  s->read = q;
+
+  /* done */
+  return r;
+}
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h
new file mode 100644
index 0000000..a15875f
--- /dev/null
+++ b/lib/zlib_inflate/infutil.h
@@ -0,0 +1,197 @@
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h 
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+   part of the implementation of the compression library and is
+   subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+#include <linux/zconf.h>
+#include "inftrees.h"
+#include "infcodes.h"
+
+typedef enum {
+      TYPE,     /* get type bits (3, including end bit) */
+      LENS,     /* get lengths for stored */
+      STORED,   /* processing stored block */
+      TABLE,    /* get table lengths */
+      BTREE,    /* get bit lengths tree for a dynamic block */
+      DTREE,    /* get length, distance trees for a dynamic block */
+      CODES,    /* processing fixed or dynamic block */
+      DRY,      /* output remaining window bytes */
+      B_DONE,   /* finished last block, done */
+      B_BAD}    /* got a data error--stuck here */
+inflate_block_mode;
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+  /* mode */
+  inflate_block_mode  mode;     /* current inflate_block mode */
+
+  /* mode dependent information */
+  union {
+    uInt left;          /* if STORED, bytes left to copy */
+    struct {
+      uInt table;               /* table lengths (14 bits) */
+      uInt index;               /* index into blens (or border) */
+      uInt *blens;              /* bit lengths of codes */
+      uInt bb;                  /* bit length tree depth */
+      inflate_huft *tb;         /* bit length decoding tree */
+    } trees;            /* if DTREE, decoding info for trees */
+    struct {
+      inflate_codes_statef 
+         *codes;
+    } decode;           /* if CODES, current state */
+  } sub;                /* submode */
+  uInt last;            /* true if this block is the last block */
+
+  /* mode independent information */
+  uInt bitk;            /* bits in bit buffer */
+  uLong bitb;           /* bit buffer */
+  inflate_huft *hufts;  /* single malloc for tree space */
+  Byte *window;         /* sliding window */
+  Byte *end;            /* one byte after sliding window */
+  Byte *read;           /* window read pointer */
+  Byte *write;          /* window write pointer */
+  check_func checkfn;   /* check function */
+  uLong check;          /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/*   update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return zlib_inflate_flush(s,z,r);}
+/*   get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/*   output bytes */
+#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
+#define FLUSH {UPDOUT r=zlib_inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/*   load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
+extern uInt zlib_inflate_mask[17];
+
+/* copy as much as possible from the sliding window to the output area */
+extern int zlib_inflate_flush (
+    inflate_blocks_statef *,
+    z_streamp ,
+    int);
+
+/* inflate private state */
+typedef enum {
+      METHOD,   /* waiting for method byte */
+      FLAG,     /* waiting for flag byte */
+      DICT4,    /* four dictionary check bytes to go */
+      DICT3,    /* three dictionary check bytes to go */
+      DICT2,    /* two dictionary check bytes to go */
+      DICT1,    /* one dictionary check byte to go */
+      DICT0,    /* waiting for inflateSetDictionary */
+      BLOCKS,   /* decompressing blocks */
+      CHECK4,   /* four check bytes to go */
+      CHECK3,   /* three check bytes to go */
+      CHECK2,   /* two check bytes to go */
+      CHECK1,   /* one check byte to go */
+      I_DONE,   /* finished check, done */
+      I_BAD}    /* got an error--stay here */
+inflate_mode;
+
+struct internal_state {
+
+  /* mode */
+  inflate_mode  mode;   /* current inflate mode */
+
+  /* mode dependent information */
+  union {
+    uInt method;        /* if FLAGS, method byte */
+    struct {
+      uLong was;                /* computed check value */
+      uLong need;               /* stream check value */
+    } check;            /* if CHECK, check values to compare */
+    uInt marker;        /* if BAD, inflateSync's marker bytes count */
+  } sub;        /* submode */
+
+  /* mode independent information */
+  int  nowrap;          /* flag for no wrapper */
+  uInt wbits;           /* log2(window size)  (8..15, defaults to 15) */
+  inflate_blocks_statef 
+    *blocks;            /* current inflate_blocks state */
+
+};
+
+/* inflate codes private state */
+typedef enum {        /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+      START,    /* x: set up for LEN */
+      LEN,      /* i: get length/literal/eob next */
+      LENEXT,   /* i: getting length extra (have base) */
+      DIST,     /* i: get distance next */
+      DISTEXT,  /* i: getting distance extra */
+      COPY,     /* o: copying bytes in window, waiting for space */
+      LIT,      /* o: got literal, waiting for output space */
+      WASH,     /* o: got eob, possibly still output waiting */
+      END,      /* x: got eob and all data flushed */
+      BADCODE}  /* x: got error */
+inflate_codes_mode;
+
+struct inflate_codes_state {
+
+  /* mode */
+  inflate_codes_mode mode;      /* current inflate_codes mode */
+
+  /* mode dependent information */
+  uInt len;
+  union {
+    struct {
+      inflate_huft *tree;       /* pointer into tree */
+      uInt need;                /* bits needed */
+    } code;             /* if LEN or DIST, where in tree */
+    uInt lit;           /* if LIT, literal */
+    struct {
+      uInt get;                 /* bits to get for extra */
+      uInt dist;                /* distance back to copy from */
+    } copy;             /* if EXT or COPY, where and how much */
+  } sub;                /* submode */
+
+  /* mode independent information */
+  Byte lbits;           /* ltree bits decoded per branch */
+  Byte dbits;           /* dtree bits decoder per branch */
+  inflate_huft *ltree;          /* literal/length/eob tree */
+  inflate_huft *dtree;          /* distance tree */
+
+};
+
+/* memory allocation for inflation */
+
+struct inflate_workspace {
+	inflate_codes_statef working_state;
+	struct inflate_blocks_state working_blocks_state;
+	struct internal_state internal_state;
+	unsigned int tree_work_area_1[19];
+	unsigned int tree_work_area_2[288];
+	unsigned working_blens[258 + 0x1f + 0x1f];
+	inflate_huft working_hufts[MANY];
+	unsigned char working_window[1 << MAX_WBITS];
+};
+
+#define WS(z) ((struct inflate_workspace *)(z->workspace))
+
+#endif