Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k: (24 commits)
  m68k: amiga - RTC platform device conversion
  m68k: amiga - Parallel port platform device conversion
  m68k: amiga - Serial port platform device conversion
  m68k: amiga - Mouse platform device conversion
  m68k: amiga - Keyboard platform device conversion
  m68k: amiga - Amiga Gayle IDE platform device conversion
  m68k: amiga - A4000T SCSI platform device conversion
  m68k/scsi: a3000 - Do not use legacy Scsi_Host.base
  m68k: amiga - A3000 SCSI platform device conversion
  m68k/scsi: gvp11 - Do not use legacy Scsi_Host.base
  m68k: amiga - GVP Series II SCSI zorro_driver conversion
  m68k/scsi: a2091 - Do not use legacy Scsi_Host.base
  m68k: amiga - A2091/A590 SCSI zorro_driver conversion
  m68k/scsi: mvme147 - Kill obsolete HOSTS_C logic
  m68k/scsi: a3000 - Kill a3000_scsiregs typedef
  m68k/scsi: gvp11 - Kill gvp11_scsiregs typedef
  m68k/scsi: a2091 - Kill a2091_scsiregs typedef
  m68k/scsi: gvp11 - Extract check_wd33c93()
  m68k/scsi: a3000 - Kill static global a3000_host
  m68k/scsi: mvme147 - Kill static global mvme147_host
  ...
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 2e435ad..98ce517 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -639,6 +639,36 @@
 they are entirely deprecated.  Some ports already do not provide these
 as it is impossible to correctly support them.
 
+			Handling Errors
+
+DMA address space is limited on some architectures and an allocation
+failure can be determined by:
+
+- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
+
+- checking the returned dma_addr_t of dma_map_single and dma_map_page
+  by using dma_mapping_error():
+
+	dma_addr_t dma_handle;
+
+	dma_handle = dma_map_single(dev, addr, size, direction);
+	if (dma_mapping_error(dev, dma_handle)) {
+		/*
+		 * reduce current DMA mapping usage,
+		 * delay and try again later or
+		 * reset driver.
+		 */
+	}
+
+Networking drivers must call dev_kfree_skb to free the socket buffer
+and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook
+(ndo_start_xmit). This means that the socket buffer is just dropped in
+the failure case.
+
+SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping
+fails in the queuecommand hook. This means that the SCSI subsystem
+passes the command to the driver again later.
+
 		Optimizing Unmap State Space Consumption
 
 On many platforms, dma_unmap_{single,page}() is simply a nop.
@@ -703,42 +733,25 @@
 
 1) Struct scatterlist requirements.
 
-   Struct scatterlist must contain, at a minimum, the following
-   members:
+   Don't invent the architecture specific struct scatterlist; just use
+   <asm-generic/scatterlist.h>. You need to enable
+   CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs
+   (including software IOMMU).
 
-	struct page *page;
-	unsigned int offset;
-	unsigned int length;
+2) ARCH_KMALLOC_MINALIGN
 
-   The base address is specified by a "page+offset" pair.
+   Architectures must ensure that kmalloc'ed buffer is
+   DMA-safe. Drivers and subsystems depend on it. If an architecture
+   isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in
+   the CPU cache is identical to data in main memory),
+   ARCH_KMALLOC_MINALIGN must be set so that the memory allocator
+   makes sure that kmalloc'ed buffer doesn't share a cache line with
+   the others. See arch/arm/include/asm/cache.h as an example.
 
-   Previous versions of struct scatterlist contained a "void *address"
-   field that was sometimes used instead of page+offset.  As of Linux
-   2.5., page+offset is always used, and the "address" field has been
-   deleted.
-
-2) More to come...
-
-			Handling Errors
-
-DMA address space is limited on some architectures and an allocation
-failure can be determined by:
-
-- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
-
-- checking the returned dma_addr_t of dma_map_single and dma_map_page
-  by using dma_mapping_error():
-
-	dma_addr_t dma_handle;
-
-	dma_handle = dma_map_single(dev, addr, size, direction);
-	if (dma_mapping_error(dev, dma_handle)) {
-		/*
-		 * reduce current DMA mapping usage,
-		 * delay and try again later or
-		 * reset driver.
-		 */
-	}
+   Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment
+   constraints. You don't need to worry about the architecture data
+   alignment constraints (e.g. the alignment constraints about 64-bit
+   objects).
 
 			   Closing
 
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 99e72a8..4947fd8 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -130,6 +130,8 @@
 	ftp.??.kernel.org:/pub/linux/kernel/...
 	?? == your country code, such as "us", "uk", "fr", etc.
 
+	http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git
+
 Linux kernel mailing list:
 	linux-kernel@vger.kernel.org
 	[mail majordomo@vger.kernel.org to subscribe]
@@ -160,3 +162,6 @@
 
 Kernel Janitor:
 	http://janitor.kernelnewbies.org/
+
+GIT, Fast Version Control System:
+	http://git-scm.com/
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 57444c2..b34823f 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -339,7 +339,7 @@
 The "xxx" is not interpreted by the cgroup code, but will appear in
 /proc/mounts so may be any useful identifying string that you like.
 
-To mount a cgroup hierarchy with just the cpuset and numtasks
+To mount a cgroup hierarchy with just the cpuset and memory
 subsystems, type:
 # mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
 
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 6cab1f2..7781857 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,18 +1,15 @@
 Memory Resource Controller
 
 NOTE: The Memory Resource Controller has been generically been referred
-to as the memory controller in this document. Do not confuse memory controller
-used here with the memory controller that is used in hardware.
+      to as the memory controller in this document. Do not confuse memory
+      controller used here with the memory controller that is used in hardware.
 
-Salient features
-
-a. Enable control of Anonymous, Page Cache (mapped and unmapped) and
-   Swap Cache memory pages.
-b. The infrastructure allows easy addition of other types of memory to control
-c. Provides *zero overhead* for non memory controller users
-d. Provides a double LRU: global memory pressure causes reclaim from the
-   global LRU; a cgroup on hitting a limit, reclaims from the per
-   cgroup LRU
+(For editors)
+In this document:
+      When we mention a cgroup (cgroupfs's directory) with memory controller,
+      we call it "memory cgroup". When you see git-log and source code, you'll
+      see patch's title and function names tend to use "memcg".
+      In this document, we avoid using it.
 
 Benefits and Purpose of the memory controller
 
@@ -33,6 +30,45 @@
 e. There are several other use cases, find one or use the controller just
    for fun (to learn and hack on the VM subsystem).
 
+Current Status: linux-2.6.34-mmotm(development version of 2010/April)
+
+Features:
+ - accounting anonymous pages, file caches, swap caches usage and limiting them.
+ - private LRU and reclaim routine. (system's global LRU and private LRU
+   work independently from each other)
+ - optionally, memory+swap usage can be accounted and limited.
+ - hierarchical accounting
+ - soft limit
+ - moving(recharging) account at moving a task is selectable.
+ - usage threshold notifier
+ - oom-killer disable knob and oom-notifier
+ - Root cgroup has no limit controls.
+
+ Kernel memory and Hugepages are not under control yet. We just manage
+ pages on LRU. To add more controls, we have to take care of performance.
+
+Brief summary of control files.
+
+ tasks				 # attach a task(thread) and show list of threads
+ cgroup.procs			 # show list of processes
+ cgroup.event_control		 # an interface for event_fd()
+ memory.usage_in_bytes		 # show current memory(RSS+Cache) usage.
+ memory.memsw.usage_in_bytes	 # show current memory+Swap usage
+ memory.limit_in_bytes		 # set/show limit of memory usage
+ memory.memsw.limit_in_bytes	 # set/show limit of memory+Swap usage
+ memory.failcnt			 # show the number of memory usage hits limits
+ memory.memsw.failcnt		 # show the number of memory+Swap hits limits
+ memory.max_usage_in_bytes	 # show max memory usage recorded
+ memory.memsw.usage_in_bytes	 # show max memory+Swap usage recorded
+ memory.soft_limit_in_bytes	 # set/show soft limit of memory usage
+ memory.stat			 # show various statistics
+ memory.use_hierarchy		 # set/show hierarchical account enabled
+ memory.force_empty		 # trigger forced move charge to parent
+ memory.swappiness		 # set/show swappiness parameter of vmscan
+				 (See sysctl's vm.swappiness)
+ memory.move_charge_at_immigrate # set/show controls of moving charges
+ memory.oom_control		 # set/show oom controls.
+
 1. History
 
 The memory controller has a long history. A request for comments for the memory
@@ -106,14 +142,14 @@
 is over its limit. If it is then reclaim is invoked on the cgroup.
 More details can be found in the reclaim section of this document.
 If everything goes well, a page meta-data-structure called page_cgroup is
-allocated and associated with the page.  This routine also adds the page to
-the per cgroup LRU.
+updated. page_cgroup has its own LRU on cgroup.
+(*) page_cgroup structure is allocated at boot/memory-hotplug time.
 
 2.2.1 Accounting details
 
 All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-(some pages which never be reclaimable and will not be on global LRU
- are not accounted. we just accounts pages under usual vm management.)
+Some pages which are never reclaimable and will not be on the global LRU
+are not accounted. We just account pages under usual VM management.
 
 RSS pages are accounted at page_fault unless they've already been accounted
 for earlier. A file page will be accounted for as Page Cache when it's
@@ -121,12 +157,19 @@
 processes, duplicate accounting is carefully avoided.
 
 A RSS page is unaccounted when it's fully unmapped. A PageCache page is
-unaccounted when it's removed from radix-tree.
+unaccounted when it's removed from radix-tree. Even if RSS pages are fully
+unmapped (by kswapd), they may exist as SwapCache in the system until they
+are really freed. Such SwapCaches also also accounted.
+A swapped-in page is not accounted until it's mapped.
+
+Note: The kernel does swapin-readahead and read multiple swaps at once.
+This means swapped-in pages may contain pages for other tasks than a task
+causing page fault. So, we avoid accounting at swap-in I/O.
 
 At page migration, accounting information is kept.
 
-Note: we just account pages-on-lru because our purpose is to control amount
-of used pages. not-on-lru pages are tend to be out-of-control from vm view.
+Note: we just account pages-on-LRU because our purpose is to control amount
+of used pages; not-on-LRU pages tend to be out-of-control from VM view.
 
 2.3 Shared Page Accounting
 
@@ -143,6 +186,7 @@
 
 
 2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
+
 Swap Extension allows you to record charge for swap. A swapped-in page is
 charged back to original page allocator if possible.
 
@@ -150,13 +194,20 @@
  - memory.memsw.usage_in_bytes.
  - memory.memsw.limit_in_bytes.
 
-usage of mem+swap is limited by memsw.limit_in_bytes.
+memsw means memory+swap. Usage of memory+swap is limited by
+memsw.limit_in_bytes.
 
-* why 'mem+swap' rather than swap.
+Example: Assume a system with 4G of swap. A task which allocates 6G of memory
+(by mistake) under 2G memory limitation will use all swap.
+In this case, setting memsw.limit_in_bytes=3G will prevent bad use of swap.
+By using memsw limit, you can avoid system OOM which can be caused by swap
+shortage.
+
+* why 'memory+swap' rather than swap.
 The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
 to move account from memory to swap...there is no change in usage of
-mem+swap. In other words, when we want to limit the usage of swap without
-affecting global LRU, mem+swap limit is better than just limiting swap from
+memory+swap. In other words, when we want to limit the usage of swap without
+affecting global LRU, memory+swap limit is better than just limiting swap from
 OS point of view.
 
 * What happens when a cgroup hits memory.memsw.limit_in_bytes
@@ -168,12 +219,12 @@
 
 2.5 Reclaim
 
-Each cgroup maintains a per cgroup LRU that consists of an active
-and inactive list. When a cgroup goes over its limit, we first try
+Each cgroup maintains a per cgroup LRU which has the same structure as
+global VM. When a cgroup goes over its limit, we first try
 to reclaim memory from the cgroup so as to make space for the new
 pages that the cgroup has touched. If the reclaim is unsuccessful,
 an OOM routine is invoked to select and kill the bulkiest task in the
-cgroup.
+cgroup. (See 10. OOM Control below.)
 
 The reclaim algorithm has not been modified for cgroups, except that
 pages that are selected for reclaiming come from the per cgroup LRU
@@ -184,13 +235,22 @@
 
 Note2: When panic_on_oom is set to "2", the whole system will panic.
 
-2. Locking
+When oom event notifier is registered, event will be delivered.
+(See oom_control section)
 
-The memory controller uses the following hierarchy
+2.6 Locking
 
-1. zone->lru_lock is used for selecting pages to be isolated
-2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
-3. lock_page_cgroup() is used to protect page->page_cgroup
+   lock_page_cgroup()/unlock_page_cgroup() should not be called under
+   mapping->tree_lock.
+
+   Other lock order is following:
+   PG_locked.
+   mm->page_table_lock
+       zone->lru_lock
+	  lock_page_cgroup.
+  In many cases, just lock_page_cgroup() is called.
+  per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
+  zone->lru_lock, it has no lock of its own.
 
 3. User Interface
 
@@ -199,6 +259,7 @@
 a. Enable CONFIG_CGROUPS
 b. Enable CONFIG_RESOURCE_COUNTERS
 c. Enable CONFIG_CGROUP_MEM_RES_CTLR
+d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
 
 1. Prepare the cgroups
 # mkdir -p /cgroups
@@ -206,31 +267,28 @@
 
 2. Make the new group and move bash into it
 # mkdir /cgroups/0
-# echo $$ >  /cgroups/0/tasks
+# echo $$ > /cgroups/0/tasks
 
-Since now we're in the 0 cgroup,
-We can alter the memory limit:
+Since now we're in the 0 cgroup, we can alter the memory limit:
 # echo 4M > /cgroups/0/memory.limit_in_bytes
 
 NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
-mega or gigabytes.
+mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
+
 NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
 NOTE: We cannot set limits on the root cgroup any more.
 
 # cat /cgroups/0/memory.limit_in_bytes
 4194304
 
-NOTE: The interface has now changed to display the usage in bytes
-instead of pages
-
 We can check the usage:
 # cat /cgroups/0/memory.usage_in_bytes
 1216512
 
 A successful write to this file does not guarantee a successful set of
-this limit to the value written into the file.  This can be due to a
+this limit to the value written into the file. This can be due to a
 number of factors, such as rounding up to page boundaries or the total
-availability of memory on the system.  The user is required to re-read
+availability of memory on the system. The user is required to re-read
 this file after a write to guarantee the value committed by the kernel.
 
 # echo 1 > memory.limit_in_bytes
@@ -245,15 +303,23 @@
 
 4. Testing
 
-Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
-Apart from that v6 has been tested with several applications and regular
-daily use. The controller has also been tested on the PPC64, x86_64 and
-UML platforms.
+For testing features and implementation, see memcg_test.txt.
+
+Performance test is also important. To see pure memory controller's overhead,
+testing on tmpfs will give you good numbers of small overheads.
+Example: do kernel make on tmpfs.
+
+Page-fault scalability is also important. At measuring parallel
+page fault test, multi-process test may be better than multi-thread
+test because it has noise of shared objects/status.
+
+But the above two are testing extreme situations.
+Trying usual test under memory controller is always helpful.
 
 4.1 Troubleshooting
 
 Sometimes a user might find that the application under a cgroup is
-terminated. There are several causes for this:
+terminated by OOM killer. There are several causes for this:
 
 1. The cgroup limit is too low (just too low to do anything useful)
 2. The user is using anonymous memory and swap is turned off or too low
@@ -261,6 +327,9 @@
 A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
 some of the pages cached in the cgroup (page cache pages).
 
+To know what happens, disable OOM_Kill by 10. OOM Control(see below) and
+seeing what happens will be helpful.
+
 4.2 Task migration
 
 When a task migrates from one cgroup to another, its charge is not
@@ -268,16 +337,19 @@
 remain charged to it, the charge is dropped when the page is freed or
 reclaimed.
 
-Note: You can move charges of a task along with task migration. See 8.
+You can move charges of a task along with task migration.
+See 8. "Move charges at task migration"
 
 4.3 Removing a cgroup
 
 A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
 cgroup might have some charge associated with it, even though all
-tasks have migrated away from it.
-Such charges are freed(at default) or moved to its parent. When moved,
-both of RSS and CACHES are moved to parent.
-If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also.
+tasks have migrated away from it. (because we charge against pages, not
+against tasks.)
+
+Such charges are freed or moved to their parent. At moving, both of RSS
+and CACHES are moved to parent.
+rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
 
 Charges recorded in swap information is not updated at removal of cgroup.
 Recorded information is discarded and a cgroup which uses swap (swapcache)
@@ -293,10 +365,10 @@
 
   # echo 0 > memory.force_empty
 
-  Almost all pages tracked by this memcg will be unmapped and freed. Some of
-  pages cannot be freed because it's locked or in-use. Such pages are moved
-  to parent and this cgroup will be empty. But this may return -EBUSY in
-  some too busy case.
+  Almost all pages tracked by this memory cgroup will be unmapped and freed.
+  Some pages cannot be freed because they are locked or in-use. Such pages are
+  moved to parent and this cgroup will be empty. This may return -EBUSY if
+  VM is too busy to free/move all pages immediately.
 
   Typical use case of this interface is that calling this before rmdir().
   Because rmdir() moves all pages to parent, some out-of-use page caches can be
@@ -306,19 +378,41 @@
 
 memory.stat file includes following statistics
 
+# per-memory cgroup local status
 cache		- # of bytes of page cache memory.
 rss		- # of bytes of anonymous and swap cache memory.
+mapped_file	- # of bytes of mapped file (includes tmpfs/shmem)
 pgpgin		- # of pages paged in (equivalent to # of charging events).
 pgpgout		- # of pages paged out (equivalent to # of uncharging events).
-active_anon	- # of bytes of anonymous and  swap cache memory on active
-		  lru list.
+swap		- # of bytes of swap usage
 inactive_anon	- # of bytes of anonymous memory and swap cache memory on
-		  inactive lru list.
-active_file	- # of bytes of file-backed memory on active lru list.
-inactive_file	- # of bytes of file-backed memory on inactive lru list.
+		LRU list.
+active_anon	- # of bytes of anonymous and swap cache memory on active
+		inactive LRU list.
+inactive_file	- # of bytes of file-backed memory on inactive LRU list.
+active_file	- # of bytes of file-backed memory on active LRU list.
 unevictable	- # of bytes of memory that cannot be reclaimed (mlocked etc).
 
-The following additional stats are dependent on CONFIG_DEBUG_VM.
+# status considering hierarchy (see memory.use_hierarchy settings)
+
+hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
+			under which the memory cgroup is
+hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
+			hierarchy under which memory cgroup is.
+
+total_cache		- sum of all children's "cache"
+total_rss		- sum of all children's "rss"
+total_mapped_file	- sum of all children's "cache"
+total_pgpgin		- sum of all children's "pgpgin"
+total_pgpgout		- sum of all children's "pgpgout"
+total_swap		- sum of all children's "swap"
+total_inactive_anon	- sum of all children's "inactive_anon"
+total_active_anon	- sum of all children's "active_anon"
+total_inactive_file	- sum of all children's "inactive_file"
+total_active_file	- sum of all children's "active_file"
+total_unevictable	- sum of all children's "unevictable"
+
+# The following additional stats are dependent on CONFIG_DEBUG_VM.
 
 inactive_ratio		- VM internal parameter. (see mm/page_alloc.c)
 recent_rotated_anon	- VM internal parameter. (see mm/vmscan.c)
@@ -327,24 +421,37 @@
 recent_scanned_file	- VM internal parameter. (see mm/vmscan.c)
 
 Memo:
-	recent_rotated means recent frequency of lru rotation.
-	recent_scanned means recent # of scans to lru.
+	recent_rotated means recent frequency of LRU rotation.
+	recent_scanned means recent # of scans to LRU.
 	showing for better debug please see the code for meanings.
 
 Note:
 	Only anonymous and swap cache memory is listed as part of 'rss' stat.
 	This should not be confused with the true 'resident set size' or the
-	amount of physical memory used by the cgroup. Per-cgroup rss
-	accounting is not done yet.
+	amount of physical memory used by the cgroup.
+	'rss + file_mapped" will give you resident set size of cgroup.
+	(Note: file and shmem may be shared among other cgroups. In that case,
+	 file_mapped is accounted only when the memory cgroup is owner of page
+	 cache.)
 
 5.3 swappiness
-  Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
 
-  Following cgroups' swappiness can't be changed.
-  - root cgroup (uses /proc/sys/vm/swappiness).
-  - a cgroup which uses hierarchy and it has child cgroup.
-  - a cgroup which uses hierarchy and not the root of hierarchy.
+Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
 
+Following cgroups' swappiness can't be changed.
+- root cgroup (uses /proc/sys/vm/swappiness).
+- a cgroup which uses hierarchy and it has other cgroup(s) below it.
+- a cgroup which uses hierarchy and not the root of hierarchy.
+
+5.4 failcnt
+
+A memory cgroup provides memory.failcnt and memory.memsw.failcnt files.
+This failcnt(== failure count) shows the number of times that a usage counter
+hit its limit. When a memory cgroup hits a limit, failcnt increases and
+memory under it will be reclaimed.
+
+You can reset failcnt by writing 0 to failcnt file.
+# echo 0 > .../memory.failcnt
 
 6. Hierarchy support
 
@@ -363,13 +470,13 @@
 
 In the diagram above, with hierarchical accounting enabled, all memory
 usage of e, is accounted to its ancestors up until the root (i.e, c and root),
-that has memory.use_hierarchy enabled.  If one of the ancestors goes over its
+that has memory.use_hierarchy enabled. If one of the ancestors goes over its
 limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
 children of the ancestor.
 
 6.1 Enabling hierarchical accounting and reclaim
 
-The memory controller by default disables the hierarchy feature. Support
+A memory cgroup by default disables the hierarchy feature. Support
 can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
 
 # echo 1 > memory.use_hierarchy
@@ -379,10 +486,10 @@
 # echo 0 > memory.use_hierarchy
 
 NOTE1: Enabling/disabling will fail if the cgroup already has other
-cgroups created below it.
+       cgroups created below it.
 
 NOTE2: When panic_on_oom is set to "2", the whole system will panic in
-case of an oom event in any cgroup.
+       case of an OOM event in any cgroup.
 
 7. Soft limits
 
@@ -392,7 +499,7 @@
 a. There is no memory contention
 b. They do not exceed their hard limit
 
-When the system detects memory contention or low memory control groups
+When the system detects memory contention or low memory, control groups
 are pushed back to their soft limits. If the soft limit of each control
 group is very high, they are pushed back as much as possible to make
 sure that one control group does not starve the others of memory.
@@ -406,7 +513,7 @@
 7.1 Interface
 
 Soft limits can be setup by using the following commands (in this example we
-assume a soft limit of 256 megabytes)
+assume a soft limit of 256 MiB)
 
 # echo 256M > memory.soft_limit_in_bytes
 
@@ -442,7 +549,7 @@
 Note: If we cannot find enough space for the task in the destination cgroup, we
       try to make space by reclaiming memory. Task migration may fail if we
       cannot make enough space.
-Note: It can take several seconds if you move charges in giga bytes order.
+Note: It can take several seconds if you move charges much.
 
 And if you want disable it again:
 
@@ -451,21 +558,27 @@
 8.2 Type of charges which can be move
 
 Each bits of move_charge_at_immigrate has its own meaning about what type of
-charges should be moved.
+charges should be moved. But in any cases, it must be noted that an account of
+a page or a swap can be moved only when it is charged to the task's current(old)
+memory cgroup.
 
   bit | what type of charges would be moved ?
  -----+------------------------------------------------------------------------
    0  | A charge of an anonymous page(or swap of it) used by the target task.
       | Those pages and swaps must be used only by the target task. You must
       | enable Swap Extension(see 2.4) to enable move of swap charges.
-
-Note: Those pages and swaps must be charged to the old cgroup.
-Note: More type of pages(e.g. file cache, shmem,) will be supported by other
-      bits in future.
+ -----+------------------------------------------------------------------------
+   1  | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
+      | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
+      | anonymous pages, file pages(and swaps) in the range mmapped by the task
+      | will be moved even if the task hasn't done page fault, i.e. they might
+      | not be the task's "RSS", but other task's "RSS" that maps the same file.
+      | And mapcount of the page is ignored(the page can be moved even if
+      | page_mapcount(page) > 1). You must enable Swap Extension(see 2.4) to
+      | enable move of swap charges.
 
 8.3 TODO
 
-- Add support for other types of pages(e.g. file cache, shmem, etc.).
 - Implement madvise(2) to let users decide the vma to be moved or not to be
   moved.
 - All of moving charge operations are done under cgroup_mutex. It's not good
@@ -473,22 +586,61 @@
 
 9. Memory thresholds
 
-Memory controler implements memory thresholds using cgroups notification
+Memory cgroup implements memory thresholds using cgroups notification
 API (see cgroups.txt). It allows to register multiple memory and memsw
 thresholds and gets notifications when it crosses.
 
 To register a threshold application need:
- - create an eventfd using eventfd(2);
- - open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
- - write string like "<event_fd> <memory.usage_in_bytes> <threshold>" to
-   cgroup.event_control.
+- create an eventfd using eventfd(2);
+- open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
+- write string like "<event_fd> <fd of memory.usage_in_bytes> <threshold>" to
+  cgroup.event_control.
 
 Application will be notified through eventfd when memory usage crosses
 threshold in any direction.
 
 It's applicable for root and non-root cgroup.
 
-10. TODO
+10. OOM Control
+
+memory.oom_control file is for OOM notification and other controls.
+
+Memory cgroup implements OOM notifier using cgroup notification
+API (See cgroups.txt). It allows to register multiple OOM notification
+delivery and gets notification when OOM happens.
+
+To register a notifier, application need:
+ - create an eventfd using eventfd(2)
+ - open memory.oom_control file
+ - write string like "<event_fd> <fd of memory.oom_control>" to
+   cgroup.event_control
+
+Application will be notified through eventfd when OOM happens.
+OOM notification doesn't work for root cgroup.
+
+You can disable OOM-killer by writing "1" to memory.oom_control file, as:
+
+	#echo 1 > memory.oom_control
+
+This operation is only allowed to the top cgroup of sub-hierarchy.
+If OOM-killer is disabled, tasks under cgroup will hang/sleep
+in memory cgroup's OOM-waitqueue when they request accountable memory.
+
+For running them, you have to relax the memory cgroup's OOM status by
+	* enlarge limit or reduce usage.
+To reduce usage,
+	* kill some tasks.
+	* move some tasks to other group with account migration.
+	* remove some files (on tmpfs?)
+
+Then, stopped tasks will work again.
+
+At reading, current status of OOM is shown.
+	oom_kill_disable 0 or 1 (if 1, oom-killer is disabled)
+	under_oom	 0 or 1 (if 1, the memory cgroup is under OOM, tasks may
+				 be stopped.)
+
+11. TODO
 
 1. Add support for accounting huge pages (as a separate controller)
 2. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index af16080..61c98f0 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -429,8 +429,9 @@
 implementations.  If your fs is not using generic_file_llseek, you
 need to acquire and release the appropriate locks in your ->llseek().
 For many filesystems, it is probably safe to acquire the inode
-mutex.  Note some filesystems (i.e. remote ones) provide no
-protection for i_size so you will need to use the BKL.
+mutex or just to use i_size_read() instead.
+Note: this does not protect the file->f_pos against concurrent modifications
+since this is something the userspace has to take care about.
 
 Note: ext2_release() was *the* source of contention on fs-intensive
 loads and dropping BKL on ->release() helps to get rid of that (we still
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index e93ad94..a200a38 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -1,41 +1,149 @@
 Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com>
 
-The intent of this file is to have an uptodate, running commentary 
-from different people about NUMA specific code in the Linux vm.
+What is NUMA?
 
-What is NUMA? It is an architecture where the memory access times
-for different regions of memory from a given processor varies
-according to the "distance" of the memory region from the processor.
-Each region of memory to which access times are the same from any 
-cpu, is called a node. On such architectures, it is beneficial if
-the kernel tries to minimize inter node communications. Schemes
-for this range from kernel text and read-only data replication
-across nodes, and trying to house all the data structures that
-key components of the kernel need on memory on that node.
+This question can be answered from a couple of perspectives:  the
+hardware view and the Linux software view.
 
-Currently, all the numa support is to provide efficient handling
-of widely discontiguous physical memory, so architectures which 
-are not NUMA but can have huge holes in the physical address space
-can use the same code. All this code is bracketed by CONFIG_DISCONTIGMEM.
+From the hardware perspective, a NUMA system is a computer platform that
+comprises multiple components or assemblies each of which may contain 0
+or more CPUs, local memory, and/or IO buses.  For brevity and to
+disambiguate the hardware view of these physical components/assemblies
+from the software abstraction thereof, we'll call the components/assemblies
+'cells' in this document.
 
-The initial port includes NUMAizing the bootmem allocator code by
-encapsulating all the pieces of information into a bootmem_data_t
-structure. Node specific calls have been added to the allocator. 
-In theory, any platform which uses the bootmem allocator should 
-be able to put the bootmem and mem_map data structures anywhere
-it deems best.
+Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset
+of the system--although some components necessary for a stand-alone SMP system
+may not be populated on any given cell.   The cells of the NUMA system are
+connected together with some sort of system interconnect--e.g., a crossbar or
+point-to-point link are common types of NUMA system interconnects.  Both of
+these types of interconnects can be aggregated to create NUMA platforms with
+cells at multiple distances from other cells.
 
-Each node's page allocation data structures have also been encapsulated
-into a pg_data_t. The bootmem_data_t is just one part of this. To 
-make the code look uniform between NUMA and regular UMA platforms, 
-UMA platforms have a statically allocated pg_data_t too (contig_page_data).
-For the sake of uniformity, the function num_online_nodes() is also defined
-for all platforms. As we run benchmarks, we might decide to NUMAize 
-more variables like low_on_memory, nr_free_pages etc into the pg_data_t.
+For Linux, the NUMA platforms of interest are primarily what is known as Cache
+Coherent NUMA or ccNUMA systems.   With ccNUMA systems, all memory is visible
+to and accessible from any CPU attached to any cell and cache coherency
+is handled in hardware by the processor caches and/or the system interconnect.
 
-The NUMA aware page allocation code currently tries to allocate pages 
-from different nodes in a round robin manner.  This will be changed to 
-do concentratic circle search, starting from current node, once the 
-NUMA port achieves more maturity. The call alloc_pages_node has been 
-added, so that drivers can make the call and not worry about whether 
-it is running on a NUMA or UMA platform.
+Memory access time and effective memory bandwidth varies depending on how far
+away the cell containing the CPU or IO bus making the memory access is from the
+cell containing the target memory.  For example, access to memory by CPUs
+attached to the same cell will experience faster access times and higher
+bandwidths than accesses to memory on other, remote cells.  NUMA platforms
+can have cells at multiple remote distances from any given cell.
+
+Platform vendors don't build NUMA systems just to make software developers'
+lives interesting.  Rather, this architecture is a means to provide scalable
+memory bandwidth.  However, to achieve scalable memory bandwidth, system and
+application software must arrange for a large majority of the memory references
+[cache misses] to be to "local" memory--memory on the same cell, if any--or
+to the closest cell with memory.
+
+This leads to the Linux software view of a NUMA system:
+
+Linux divides the system's hardware resources into multiple software
+abstractions called "nodes".  Linux maps the nodes onto the physical cells
+of the hardware platform, abstracting away some of the details for some
+architectures.  As with physical cells, software nodes may contain 0 or more
+CPUs, memory and/or IO buses.  And, again, memory accesses to memory on
+"closer" nodes--nodes that map to closer cells--will generally experience
+faster access times and higher effective bandwidth than accesses to more
+remote cells.
+
+For some architectures, such as x86, Linux will "hide" any node representing a
+physical cell that has no memory attached, and reassign any CPUs attached to
+that cell to a node representing a cell that does have memory.  Thus, on
+these architectures, one cannot assume that all CPUs that Linux associates with
+a given node will see the same local memory access times and bandwidth.
+
+In addition, for some architectures, again x86 is an example, Linux supports
+the emulation of additional nodes.  For NUMA emulation, linux will carve up
+the existing nodes--or the system memory for non-NUMA platforms--into multiple
+nodes.  Each emulated node will manage a fraction of the underlying cells'
+physical memory.  NUMA emluation is useful for testing NUMA kernel and
+application features on non-NUMA platforms, and as a sort of memory resource
+management mechanism when used together with cpusets.
+[see Documentation/cgroups/cpusets.txt]
+
+For each node with memory, Linux constructs an independent memory management
+subsystem, complete with its own free page lists, in-use page lists, usage
+statistics and locks to mediate access.  In addition, Linux constructs for
+each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE],
+an ordered "zonelist".  A zonelist specifies the zones/nodes to visit when a
+selected zone/node cannot satisfy the allocation request.  This situation,
+when a zone has no available memory to satisfy a request, is called
+"overflow" or "fallback".
+
+Because some nodes contain multiple zones containing different types of
+memory, Linux must decide whether to order the zonelists such that allocations
+fall back to the same zone type on a different node, or to a different zone
+type on the same node.  This is an important consideration because some zones,
+such as DMA or DMA32, represent relatively scarce resources.  Linux chooses
+a default zonelist order based on the sizes of the various zone types relative
+to the total memory of the node and the total memory of the system.  The
+default zonelist order may be overridden using the numa_zonelist_order kernel
+boot parameter or sysctl.  [see Documentation/kernel-parameters.txt and
+Documentation/sysctl/vm.txt]
+
+By default, Linux will attempt to satisfy memory allocation requests from the
+node to which the CPU that executes the request is assigned.  Specifically,
+Linux will attempt to allocate from the first node in the appropriate zonelist
+for the node where the request originates.  This is called "local allocation."
+If the "local" node cannot satisfy the request, the kernel will examine other
+nodes' zones in the selected zonelist looking for the first zone in the list
+that can satisfy the request.
+
+Local allocation will tend to keep subsequent access to the allocated memory
+"local" to the underlying physical resources and off the system interconnect--
+as long as the task on whose behalf the kernel allocated some memory does not
+later migrate away from that memory.  The Linux scheduler is aware of the
+NUMA topology of the platform--embodied in the "scheduling domains" data
+structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
+attempts to minimize task migration to distant scheduling domains.  However,
+the scheduler does not take a task's NUMA footprint into account directly.
+Thus, under sufficient imbalance, tasks can migrate between nodes, remote
+from their initial node and kernel data structures.
+
+System administrators and application designers can restrict a task's migration
+to improve NUMA locality using various CPU affinity command line interfaces,
+such as taskset(1) and numactl(1), and program interfaces such as
+sched_setaffinity(2).  Further, one can modify the kernel's default local
+allocation behavior using Linux NUMA memory policy.
+[see Documentation/vm/numa_memory_policy.]
+
+System administrators can restrict the CPUs and nodes' memories that a non-
+privileged user can specify in the scheduling or NUMA commands and functions
+using control groups and CPUsets.  [see Documentation/cgroups/CPUsets.txt]
+
+On architectures that do not hide memoryless nodes, Linux will include only
+zones [nodes] with memory in the zonelists.  This means that for a memoryless
+node the "local memory node"--the node of the first zone in CPU's node's
+zonelist--will not be the node itself.  Rather, it will be the node that the
+kernel selected as the nearest node with memory when it built the zonelists.
+So, default, local allocations will succeed with the kernel supplying the
+closest available memory.  This is a consequence of the same mechanism that
+allows such allocations to fallback to other nearby nodes when a node that
+does contain memory overflows.
+
+Some kernel allocations do not want or cannot tolerate this allocation fallback
+behavior.  Rather they want to be sure they get memory from the specified node
+or get notified that the node has no free memory.  This is usually the case when
+a subsystem allocates per CPU memory resources, for example.
+
+A typical model for making such an allocation is to obtain the node id of the
+node to which the "current CPU" is attached using one of the kernel's
+numa_node_id() or CPU_to_node() functions and then request memory from only
+the node id returned.  When such an allocation fails, the requesting subsystem
+may revert to its own fallback path.  The slab kernel memory allocator is an
+example of this.  Or, the subsystem may choose to disable or not to enable
+itself on allocation failure.  The kernel profiling subsystem is an example of
+this.
+
+If the architecture supports--does not hide--memoryless nodes, then CPUs
+attached to memoryless nodes would always incur the fallback path overhead
+or some subsystems would fail to initialize if they attempted to allocated
+memory exclusively from a node without memory.  To support such
+architectures transparently, kernel subsystems can use the numa_mem_id()
+or cpu_to_mem() function to locate the "local memory node" for the calling or
+specified CPU.  Again, this is the same node from which default, local page
+allocations will be attempted.
diff --git a/MAINTAINERS b/MAINTAINERS
index 18355cc..8b7eba2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5004,6 +5004,12 @@
 S:	Maintained
 F:	drivers/mmc/host/sdhci-s3c.c
 
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
+M:	Viresh Kumar <viresh.kumar@st.com>
+L:	linux-mmc@vger.kernel.org
+S:	Maintained
+F:	drivers/mmc/host/sdhci-spear.c
+
 SECURITY SUBSYSTEM
 M:	James Morris <jmorris@namei.org>
 L:	linux-security-module@vger.kernel.org (suggested Cc:)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 24efdfe..3e2e540 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -61,6 +61,9 @@
 config NEED_DMA_MAP_STATE
        def_bool y
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config GENERIC_ISA_DMA
 	bool
 	default y
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
index 440747c..5728c52 100644
--- a/arch/alpha/include/asm/scatterlist.h
+++ b/arch/alpha/include/asm/scatterlist.h
@@ -1,24 +1,7 @@
 #ifndef _ALPHA_SCATTERLIST_H
 #define _ALPHA_SCATTERLIST_H
 
-#include <asm/page.h>
-#include <asm/types.h>
-  
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	unsigned long page_link;
-	unsigned int offset;
-
-	unsigned int length;
-
-	dma_addr_t dma_address;
-	__u32 dma_length;
-};
-
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->dma_length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (~0UL)
 
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index bcda59f..2f87870 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -3,9 +3,6 @@
 
 #include <asm/memory.h>
 #include <asm/types.h>
-
 #include <asm-generic/scatterlist.h>
 
-#undef ARCH_HAS_SG_CHAIN
-
 #endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h
index 5a85e24..d4f1e96 100644
--- a/arch/arm/mach-davinci/include/mach/mmc.h
+++ b/arch/arm/mach-davinci/include/mach/mmc.h
@@ -22,6 +22,9 @@
 
 	/* Version of the MMC/SD controller */
 	u8	version;
+
+	/* Number of sg segments */
+	u8	nr_sg;
 };
 void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
 
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index e7d629b..f474a80 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -137,9 +137,7 @@
 	}
 
 	gpio_direction_input(ts_gpio);
-
-	omap_set_gpio_debounce(ts_gpio, 1);
-	omap_set_gpio_debounce_time(ts_gpio, 0xa);
+	gpio_set_debounce(ts_gpio, 310);
 }
 
 static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 5fcb52e..fefd7e6 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -209,8 +209,7 @@
 	}
 
 	gpio_direction_input(ts_gpio);
-	omap_set_gpio_debounce(ts_gpio, 1);
-	omap_set_gpio_debounce_time(ts_gpio, 0xa);
+	gpio_set_debounce(ts_gpio, 310);
 }
 
 static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 81bba19..b952610 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -579,9 +579,7 @@
 		printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
 
 	gpio_direction_input(OMAP3_EVM_TS_GPIO);
-
-	omap_set_gpio_debounce(OMAP3_EVM_TS_GPIO, 1);
-	omap_set_gpio_debounce_time(OMAP3_EVM_TS_GPIO, 0xa);
+	gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
 }
 
 static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 395d049..db06dc9 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -130,8 +130,8 @@
 static void __init pandora_keys_gpio_init(void)
 {
 	/* set debounce time for GPIO banks 4 and 6 */
-	omap_set_gpio_debounce_time(32 * 3, GPIO_DEBOUNCE_TIME);
-	omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME);
+	gpio_set_debounce(32 * 3, GPIO_DEBOUNCE_TIME);
+	gpio_set_debounce(32 * 5, GPIO_DEBOUNCE_TIME);
 }
 
 static int board_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 2504d41..2f5f823 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -328,8 +328,7 @@
 	}
 
 	gpio_direction_input(OMAP3_TS_GPIO);
-	omap_set_gpio_debounce(OMAP3_TS_GPIO, 1);
-	omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa);
+	gpio_set_debounce(OMAP3_TS_GPIO, 310);
 }
 
 static struct ads7846_platform_data ads7846_config = {
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index dc2ac42..393e921 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -624,79 +624,58 @@
 	__raw_writel(l, base + reg); \
 } while(0)
 
-void omap_set_gpio_debounce(int gpio, int enable)
+/**
+ * _set_gpio_debounce - low level gpio debounce time
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ * @debounce: debounce time to use
+ *
+ * OMAP's debounce time is in 31us steps so we need
+ * to convert and round up to the closest unit.
+ */
+static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+		unsigned debounce)
 {
-	struct gpio_bank *bank;
-	void __iomem *reg;
-	unsigned long flags;
-	u32 val, l = 1 << get_gpio_index(gpio);
+	void __iomem		*reg = bank->base;
+	u32			val;
+	u32			l;
 
-	if (cpu_class_is_omap1())
-		return;
-
-	bank = get_gpio_bank(gpio);
-	reg = bank->base;
-
-	if (cpu_is_omap44xx())
-		reg += OMAP4_GPIO_DEBOUNCENABLE;
+	if (debounce < 32)
+		debounce = 0x01;
+	else if (debounce > 7936)
+		debounce = 0xff;
 	else
-		reg += OMAP24XX_GPIO_DEBOUNCE_EN;
+		debounce = (debounce / 0x1f) - 1;
 
-	if (!(bank->mod_usage & l)) {
-		printk(KERN_ERR "GPIO %d not requested\n", gpio);
-		return;
-	}
-
-	spin_lock_irqsave(&bank->lock, flags);
-	val = __raw_readl(reg);
-
-	if (enable && !(val & l))
-		val |= l;
-	else if (!enable && (val & l))
-		val &= ~l;
-	else
-		goto done;
-
-	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-		bank->dbck_enable_mask = val;
-		if (enable)
-			clk_enable(bank->dbck);
-		else
-			clk_disable(bank->dbck);
-	}
-
-	__raw_writel(val, reg);
-done:
-	spin_unlock_irqrestore(&bank->lock, flags);
-}
-EXPORT_SYMBOL(omap_set_gpio_debounce);
-
-void omap_set_gpio_debounce_time(int gpio, int enc_time)
-{
-	struct gpio_bank *bank;
-	void __iomem *reg;
-
-	if (cpu_class_is_omap1())
-		return;
-
-	bank = get_gpio_bank(gpio);
-	reg = bank->base;
-
-	if (!bank->mod_usage) {
-		printk(KERN_ERR "GPIO not requested\n");
-		return;
-	}
-
-	enc_time &= 0xff;
+	l = 1 << get_gpio_index(gpio);
 
 	if (cpu_is_omap44xx())
 		reg += OMAP4_GPIO_DEBOUNCINGTIME;
 	else
 		reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
 
-	__raw_writel(enc_time, reg);
+	__raw_writel(debounce, reg);
+
+	reg = bank->base;
+	if (cpu_is_omap44xx())
+		reg += OMAP4_GPIO_DEBOUNCENABLE;
+	else
+		reg += OMAP24XX_GPIO_DEBOUNCE_EN;
+
+	val = __raw_readl(reg);
+
+	if (debounce) {
+		val |= l;
+		if (cpu_is_omap34xx() || cpu_is_omap44xx())
+			clk_enable(bank->dbck);
+	} else {
+		val &= ~l;
+		if (cpu_is_omap34xx() || cpu_is_omap44xx())
+			clk_disable(bank->dbck);
+	}
+
+	__raw_writel(val, reg);
 }
-EXPORT_SYMBOL(omap_set_gpio_debounce_time);
 
 #ifdef CONFIG_ARCH_OMAP2PLUS
 static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
@@ -1656,6 +1635,20 @@
 	return 0;
 }
 
+static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
+		unsigned debounce)
+{
+	struct gpio_bank *bank;
+	unsigned long flags;
+
+	bank = container_of(chip, struct gpio_bank, chip);
+	spin_lock_irqsave(&bank->lock, flags);
+	_set_gpio_debounce(bank, offset, debounce);
+	spin_unlock_irqrestore(&bank->lock, flags);
+
+	return 0;
+}
+
 static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
 	struct gpio_bank *bank;
@@ -1909,6 +1902,7 @@
 		bank->chip.direction_input = gpio_input;
 		bank->chip.get = gpio_get;
 		bank->chip.direction_output = gpio_output;
+		bank->chip.set_debounce = gpio_debounce;
 		bank->chip.set = gpio_set;
 		bank->chip.to_irq = gpio_2irq;
 		if (bank_is_mpuio(bank)) {
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
index 377320e..06394e5 100644
--- a/arch/avr32/include/asm/scatterlist.h
+++ b/arch/avr32/include/asm/scatterlist.h
@@ -1,25 +1,7 @@
 #ifndef __ASM_AVR32_SCATTERLIST_H
 #define __ASM_AVR32_SCATTERLIST_H
 
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-   unsigned long	sg_magic;
-#endif
-    unsigned long	page_link;
-    unsigned int	offset;
-    dma_addr_t		dma_address;
-    unsigned int	length;
-};
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0xffffffff)
 
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
index 04f4487..64d41d3 100644
--- a/arch/blackfin/include/asm/scatterlist.h
+++ b/arch/blackfin/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
 #ifndef _BLACKFIN_SCATTERLIST_H
 #define _BLACKFIN_SCATTERLIST_H
 
-#include <linux/mm.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	unsigned long page_link;
-	unsigned int offset;
-	dma_addr_t dma_address;
-	unsigned int length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)      ((sg)->dma_address)
-#define sg_dma_len(sg)          ((sg)->length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD	(0xffffffff)
 
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 43eb969..6ec7768 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -292,28 +292,6 @@
 			break;
 		}
 
-#ifdef CONFIG_BINFMT_ELF_FDPIC
-	case PTRACE_GETFDPIC: {
-		unsigned long tmp = 0;
-
-		switch (addr) {
-		case_PTRACE_GETFDPIC_EXEC:
-		case PTRACE_GETFDPIC_EXEC:
-			tmp = child->mm->context.exec_fdpic_loadmap;
-			break;
-		case_PTRACE_GETFDPIC_INTERP:
-		case PTRACE_GETFDPIC_INTERP:
-			tmp = child->mm->context.interp_fdpic_loadmap;
-			break;
-		default:
-			break;
-		}
-
-		ret = put_user(tmp, datap);
-		break;
-	}
-#endif
-
 		/* when I and D space are separate, this will have to be fixed. */
 	case PTRACE_POKEDATA:
 		pr_debug("ptrace: PTRACE_PEEKDATA\n");
@@ -357,8 +335,14 @@
 	case PTRACE_PEEKUSR:
 		switch (addr) {
 #ifdef CONFIG_BINFMT_ELF_FDPIC	/* backwards compat */
-		case PT_FDPIC_EXEC:   goto case_PTRACE_GETFDPIC_EXEC;
-		case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP;
+		case PT_FDPIC_EXEC:
+			request = PTRACE_GETFDPIC;
+			addr = PTRACE_GETFDPIC_EXEC;
+			goto case_default;
+		case PT_FDPIC_INTERP:
+			request = PTRACE_GETFDPIC;
+			addr = PTRACE_GETFDPIC_INTERP;
+			goto case_default;
 #endif
 		default:
 			ret = get_reg(child, addr, datap);
@@ -385,6 +369,7 @@
 					     0, sizeof(struct pt_regs),
 					     (const void __user *)data);
 
+	case_default:
 	default:
 		ret = ptrace_request(child, request, addr, data);
 		break;
diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h
index faff53a..249a784 100644
--- a/arch/cris/include/asm/scatterlist.h
+++ b/arch/cris/include/asm/scatterlist.h
@@ -1,22 +1,7 @@
 #ifndef __ASM_CRIS_SCATTERLIST_H
 #define __ASM_CRIS_SCATTERLIST_H
 
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	char *  address;    /* Location data is to be transferred to */
-	unsigned int length;
-
-	/* The following is i386 highmem junk - not used by us */
-	unsigned long page_link;
-	unsigned int offset;/* for highmem, page offset */
-
-};
-
-#define sg_dma_address(sg)	((sg)->address)
-#define sg_dma_len(sg)		((sg)->length)
-/* i386 junk */
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0x1fffffff)
 
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
index 4bca8a2..1614bfd 100644
--- a/arch/frv/include/asm/scatterlist.h
+++ b/arch/frv/include/asm/scatterlist.h
@@ -1,45 +1,7 @@
 #ifndef _ASM_SCATTERLIST_H
 #define _ASM_SCATTERLIST_H
 
-#include <asm/types.h>
-
-/*
- * Drivers must set either ->address or (preferred) page and ->offset
- * to indicate where data must be transferred to/from.
- *
- * Using page is recommended since it handles highmem data as well as
- * low mem. ->address is restricted to data which has a virtual mapping, and
- * it will go away in the future. Updating to page can be automated very
- * easily -- something like
- *
- * sg->address = some_ptr;
- *
- * can be rewritten as
- *
- * sg_set_buf(sg, some_ptr, length);
- *
- * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
- */
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long	sg_magic;
-#endif
-	unsigned long	page_link;
-	unsigned int	offset;		/* for highmem, page offset */
-
-	dma_addr_t	dma_address;
-	unsigned int	length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0xffffffffUL)
 
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 60eeed3..fac0289 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -344,26 +344,6 @@
 					     0, sizeof(child->thread.user->f),
 					     (const void __user *)data);
 
-	case PTRACE_GETFDPIC:
-		tmp = 0;
-		switch (addr) {
-		case PTRACE_GETFDPIC_EXEC:
-			tmp = child->mm->context.exec_fdpic_loadmap;
-			break;
-		case PTRACE_GETFDPIC_INTERP:
-			tmp = child->mm->context.interp_fdpic_loadmap;
-			break;
-		default:
-			break;
-		}
-
-		ret = 0;
-		if (put_user(tmp, (unsigned long *) data)) {
-			ret = -EFAULT;
-			break;
-		}
-		break;
-
 	default:
 		ret = ptrace_request(child, request, addr, data);
 		break;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 71abd15..6c155d6 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -46,8 +46,9 @@
 /*
  * handle requests to dynamically switch the write caching mode delivered by /proc
  */
-static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
-				 void __user *buffer, size_t *lenp, loff_t *ppos)
+static int procctl_frv_cachemode(ctl_table *table, int write,
+				 void __user *buffer, size_t *lenp,
+				 loff_t *ppos)
 {
 	unsigned long hsr0;
 	char buff[8];
@@ -84,7 +85,7 @@
 	}
 
 	/* read the state */
-	if (filp->f_pos > 0) {
+	if (*ppos > 0) {
 		*lenp = 0;
 		return 0;
 	}
@@ -110,7 +111,7 @@
 		return -EFAULT;
 
 	*lenp = len;
-	filp->f_pos = len;
+	*ppos = len;
 	return 0;
 
 } /* end procctl_frv_cachemode() */
@@ -120,8 +121,9 @@
  * permit the mm_struct the nominated process is using have its MMU context ID pinned
  */
 #ifdef CONFIG_MMU
-static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
-				void __user *buffer, size_t *lenp, loff_t *ppos)
+static int procctl_frv_pin_cxnr(ctl_table *table, int write,
+				void __user *buffer, size_t *lenp,
+				loff_t *ppos)
 {
 	pid_t pid;
 	char buff[16], *p;
@@ -150,7 +152,7 @@
 	}
 
 	/* read the currently pinned CXN */
-	if (filp->f_pos > 0) {
+	if (*ppos > 0) {
 		*lenp = 0;
 		return 0;
 	}
@@ -163,7 +165,7 @@
 		return -EFAULT;
 
 	*lenp = len;
-	filp->f_pos = len;
+	*ppos = len;
 	return 0;
 
 } /* end procctl_frv_pin_cxnr() */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd8..de08a4a 100644
--- a/arch/h8300/include/asm/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
@@ -1,17 +1,7 @@
 #ifndef _H8300_SCATTERLIST_H
 #define _H8300_SCATTERLIST_H
 
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long	sg_magic;
-#endif
-	unsigned long	page_link;
-	unsigned int	offset;
-	dma_addr_t	dma_address;
-	unsigned int	length;
-};
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD	(0xffffffff)
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9676100..9561082 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -56,6 +56,9 @@
 config NEED_DMA_MAP_STATE
 	def_bool y
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config SWIOTLB
        bool
 
@@ -495,6 +498,14 @@
 	def_bool y
 	depends on NUMA
 
+config USE_PERCPU_NUMA_NODE_ID
+	def_bool y
+	depends on NUMA
+
+config HAVE_MEMORYLESS_NODES
+	def_bool y
+	depends on NUMA
+
 config ARCH_PROC_KCORE_TEXT
 	def_bool y
 	depends on PROC_KCORE
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
index d8e9896..f299a4f 100644
--- a/arch/ia64/include/asm/scatterlist.h
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_IA64_SCATTERLIST_H
 #define _ASM_IA64_SCATTERLIST_H
 
+#include <asm-generic/scatterlist.h>
 /*
  * It used to be that ISA_DMA_THRESHOLD had something to do with the
  * DMA-limits of ISA-devices.  Nowadays, its only remaining use (apart
@@ -10,7 +11,6 @@
  * that's 4GB - 1.
  */
 #define ISA_DMA_THRESHOLD	0xffffffff
-
-#include <asm-generic/scatterlist.h>
+#define ARCH_HAS_SG_CHAIN
 
 #endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d323071..09f6467 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -26,11 +26,6 @@
 #define RECLAIM_DISTANCE 15
 
 /*
- * Returns the number of the node containing CPU 'cpu'
- */
-#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
-
-/*
  * Returns a bitmask of CPUs on Node 'node'.
  */
 #define cpumask_of_node(node) ((node) == -1 ?				\
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 3095654..d9485d9 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@
 	.unmap_sg = swiotlb_unmap_sg_attrs,
 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 	.sync_single_for_device = swiotlb_sync_single_for_device,
-	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 	.sync_sg_for_device = swiotlb_sync_sg_for_device,
 	.dma_supported = swiotlb_dma_supported,
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 0dec7f7..7c7909f 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -638,7 +638,7 @@
 	 */
 
 	read_lock(&tasklist_lock);
-	if (child->signal) {
+	if (child->sighand) {
 		spin_lock_irq(&child->sighand->siglock);
 		if (child->state == TASK_STOPPED &&
 		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
@@ -662,7 +662,7 @@
 	 * job control stop, so that SIGCONT can be used to wake it up.
 	 */
 	read_lock(&tasklist_lock);
-	if (child->signal) {
+	if (child->sighand) {
 		spin_lock_irq(&child->sighand->siglock);
 		if (child->state == TASK_TRACED &&
 		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e5230b2..518e876 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -390,6 +390,12 @@
 
 	fix_b0_for_bsp();
 
+	/*
+	 * numa_node_id() works after this.
+	 */
+	set_numa_node(cpu_to_node_map[cpuid]);
+	set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
+
 	ipi_call_lock_irq();
 	spin_lock(&vector_lock);
 	/* Setup the per cpu irq handling data structures */
@@ -632,6 +638,7 @@
 {
 	cpu_set(smp_processor_id(), cpu_online_map);
 	cpu_set(smp_processor_id(), cpu_callin_map);
+	set_numa_node(cpu_to_node_map[smp_processor_id()]);
 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 	paravirt_post_smp_prepare_boot_cpu();
 }
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c..aeeddd8 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
 #ifndef _ASM_M32R_SCATTERLIST_H
 #define _ASM_M32R_SCATTERLIST_H
 
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-    unsigned long sg_magic;
-#endif
-    char *  address;    /* Location data is to be transferred to, NULL for
-                         * highmem page */
-    unsigned long page_link;
-    unsigned int offset;/* for highmem, page offset */
-
-    dma_addr_t dma_address;
-    unsigned int length;
-};
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0x1fffffff)
 
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
index e27ad90..175da06 100644
--- a/arch/m68k/include/asm/scatterlist.h
+++ b/arch/m68k/include/asm/scatterlist.h
@@ -1,23 +1,9 @@
 #ifndef _M68K_SCATTERLIST_H
 #define _M68K_SCATTERLIST_H
 
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	unsigned long page_link;
-	unsigned int offset;
-	unsigned int length;
-
-	dma_addr_t dma_address;	/* A place to hang host-specific addresses at. */
-};
+#include <asm-generic/scatterlist.h>
 
 /* This is bogus and should go away. */
 #define ISA_DMA_THRESHOLD (0x00ffffff)
 
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->length)
-
 #endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
index 35d786f..dc4a890 100644
--- a/arch/microblaze/include/asm/scatterlist.h
+++ b/arch/microblaze/include/asm/scatterlist.h
@@ -1 +1,3 @@
 #include <asm-generic/scatterlist.h>
+
+#define ISA_DMA_THRESHOLD	(~0UL)
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
index 83d69fe..9af65e7 100644
--- a/arch/mips/include/asm/scatterlist.h
+++ b/arch/mips/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
 #ifndef __ASM_SCATTERLIST_H
 #define __ASM_SCATTERLIST_H
 
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long	sg_magic;
-#endif
-	unsigned long	page_link;
-	unsigned int	offset;
-	dma_addr_t	dma_address;
-	unsigned int	length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0x00ffffffUL)
 
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
index 6753590..7bd00b9 100644
--- a/arch/mn10300/include/asm/scatterlist.h
+++ b/arch/mn10300/include/asm/scatterlist.h
@@ -11,45 +11,8 @@
 #ifndef _ASM_SCATTERLIST_H
 #define _ASM_SCATTERLIST_H
 
-#include <asm/types.h>
-
-/*
- * Drivers must set either ->address or (preferred) page and ->offset
- * to indicate where data must be transferred to/from.
- *
- * Using page is recommended since it handles highmem data as well as
- * low mem. ->address is restricted to data which has a virtual mapping, and
- * it will go away in the future. Updating to page can be automated very
- * easily -- something like
- *
- * sg->address = some_ptr;
- *
- * can be rewritten as
- *
- * sg_set_page(virt_to_page(some_ptr));
- * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
- *
- * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
- */
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long	sg_magic;
-#endif
-	unsigned long	page_link;
-	unsigned int	offset;		/* for highmem, page offset */
-	dma_addr_t	dma_address;
-	unsigned int	length;
-};
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (0x00ffffff)
 
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->length)
-
 #endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9c4da3d..05a366a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -98,6 +98,9 @@
 config NEED_DMA_MAP_STATE
 	def_bool y
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config ISA_DMA_API
 	bool
 
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
index 62269b3..2c3b79b 100644
--- a/arch/parisc/include/asm/scatterlist.h
+++ b/arch/parisc/include/asm/scatterlist.h
@@ -3,25 +3,9 @@
 
 #include <asm/page.h>
 #include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	unsigned long page_link;
-	unsigned int offset;
-
-	unsigned int length;
-
-	/* an IOVA can be 64-bits on some PA-Risc platforms. */
-	dma_addr_t iova;	/* I/O Virtual Address */
-	__u32      iova_length; /* bytes mapped */
-};
-
-#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
-#define sg_dma_address(sg) ((sg)->iova)
-#define sg_dma_len(sg)     ((sg)->iova_length)
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (~0UL)
+#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
 
 #endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c4c4549..66a315e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -663,6 +663,9 @@
 config NEED_DMA_MAP_STATE
 	def_bool (PPC64 || NOT_COHERENT_CACHE)
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config GENERIC_ISA_DMA
 	bool
 	depends on PPC64 || POWER4 || 6xx && !CPM2
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
index 912bf59..34cc78f 100644
--- a/arch/powerpc/include/asm/scatterlist.h
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -9,38 +9,12 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifdef __KERNEL__
-#include <linux/types.h>
 #include <asm/dma.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long sg_magic;
-#endif
-	unsigned long page_link;
-	unsigned int offset;
-	unsigned int length;
-
-	/* For TCE or SWIOTLB support */
-	dma_addr_t dma_address;
-	u32 dma_length;
-};
-
-/*
- * These macros should be used after a dma_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)	((sg)->dma_address)
-#define sg_dma_len(sg)		((sg)->dma_length)
+#include <asm-generic/scatterlist.h>
 
 #ifdef __powerpc64__
 #define ISA_DMA_THRESHOLD	(~0UL)
 #endif
-
 #define ARCH_HAS_SG_CHAIN
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ff4da2c..e7fe218 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -39,8 +39,8 @@
 	.dma_supported = swiotlb_dma_supported,
 	.map_page = swiotlb_map_page,
 	.unmap_page = swiotlb_unmap_page,
-	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+	.sync_single_for_device = swiotlb_sync_single_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 	.sync_sg_for_device = swiotlb_sync_sg_for_device,
 	.mapping_error = swiotlb_dma_mapping_error,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6c1df57..8d1de6f 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -127,11 +127,11 @@
 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
-static inline void dma_direct_sync_single_range(struct device *dev,
-		dma_addr_t dma_handle, unsigned long offset, size_t size,
-		enum dma_data_direction direction)
+static inline void dma_direct_sync_single(struct device *dev,
+					  dma_addr_t dma_handle, size_t size,
+					  enum dma_data_direction direction)
 {
-	__dma_sync(bus_to_virt(dma_handle+offset), size, direction);
+	__dma_sync(bus_to_virt(dma_handle), size, direction);
 }
 #endif
 
@@ -144,8 +144,8 @@
 	.map_page	= dma_direct_map_page,
 	.unmap_page	= dma_direct_unmap_page,
 #ifdef CONFIG_NOT_COHERENT_CACHE
-	.sync_single_range_for_cpu 	= dma_direct_sync_single_range,
-	.sync_single_range_for_device 	= dma_direct_sync_single_range,
+	.sync_single_for_cpu 		= dma_direct_sync_single,
+	.sync_single_for_device 	= dma_direct_sync_single,
 	.sync_sg_for_cpu 		= dma_direct_sync_sg,
 	.sync_sg_for_device 		= dma_direct_sync_sg,
 #endif
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 6a1fde0..cd37e49 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1,6 +1,15 @@
 /*
  * Freescale MPC85xx/MPC86xx RapidIO support
  *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
  * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
  * Zhang Wei <wei.zhang@freescale.com>
  *
@@ -24,19 +33,30 @@
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/kfifo.h>
 
 #include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/uaccess.h>
+
+#undef DEBUG_PW	/* Port-Write debugging */
 
 /* RapidIO definition irq, which read from OF-tree */
 #define IRQ_RIO_BELL(m)		(((struct rio_priv *)(m->priv))->bellirq)
 #define IRQ_RIO_TX(m)		(((struct rio_priv *)(m->priv))->txirq)
 #define IRQ_RIO_RX(m)		(((struct rio_priv *)(m->priv))->rxirq)
+#define IRQ_RIO_PW(m)		(((struct rio_priv *)(m->priv))->pwirq)
 
 #define RIO_ATMU_REGS_OFFSET	0x10c00
 #define RIO_P_MSG_REGS_OFFSET	0x11000
 #define RIO_S_MSG_REGS_OFFSET	0x13000
 #define RIO_ESCSR		0x158
 #define RIO_CCSR		0x15c
+#define RIO_LTLEDCSR		0x0608
+#define  RIO_LTLEDCSR_IER	0x80000000
+#define  RIO_LTLEDCSR_PRT	0x01000000
+#define RIO_LTLEECSR		0x060c
+#define RIO_EPWISR		0x10010
 #define RIO_ISR_AACR		0x10120
 #define RIO_ISR_AACR_AA		0x1	/* Accept All ID */
 #define RIO_MAINT_WIN_SIZE	0x400000
@@ -55,6 +75,18 @@
 #define RIO_MSG_ISR_QFI		0x00000010
 #define RIO_MSG_ISR_DIQI	0x00000001
 
+#define RIO_IPWMR_SEN		0x00100000
+#define RIO_IPWMR_QFIE		0x00000100
+#define RIO_IPWMR_EIE		0x00000020
+#define RIO_IPWMR_CQ		0x00000002
+#define RIO_IPWMR_PWE		0x00000001
+
+#define RIO_IPWSR_QF		0x00100000
+#define RIO_IPWSR_TE		0x00000080
+#define RIO_IPWSR_QFI		0x00000010
+#define RIO_IPWSR_PWD		0x00000008
+#define RIO_IPWSR_PWB		0x00000004
+
 #define RIO_MSG_DESC_SIZE	32
 #define RIO_MSG_BUFFER_SIZE	4096
 #define RIO_MIN_TX_RING_SIZE	2
@@ -121,7 +153,7 @@
 	u32 pad10[26];
 	u32 pwmr;
 	u32 pwsr;
-	u32 pad11;
+	u32 epwqbar;
 	u32 pwqbar;
 };
 
@@ -160,6 +192,14 @@
 	void *dev_id;
 };
 
+struct rio_port_write_msg {
+	void *virt;
+	dma_addr_t phys;
+	u32 msg_count;
+	u32 err_count;
+	u32 discard_count;
+};
+
 struct rio_priv {
 	struct device *dev;
 	void __iomem *regs_win;
@@ -172,11 +212,64 @@
 	struct rio_dbell_ring dbell_ring;
 	struct rio_msg_tx_ring msg_tx_ring;
 	struct rio_msg_rx_ring msg_rx_ring;
+	struct rio_port_write_msg port_write_msg;
 	int bellirq;
 	int txirq;
 	int rxirq;
+	int pwirq;
+	struct work_struct pw_work;
+	struct kfifo pw_fifo;
+	spinlock_t pw_fifo_lock;
 };
 
+#define __fsl_read_rio_config(x, addr, err, op)		\
+	__asm__ __volatile__(				\
+		"1:	"op" %1,0(%2)\n"		\
+		"	eieio\n"			\
+		"2:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"3:	li %1,-1\n"			\
+		"	li %0,%3\n"			\
+		"	b 2b\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align 2\n"			\
+		"	.long 1b,3b\n"			\
+		".text"					\
+		: "=r" (err), "=r" (x)			\
+		: "b" (addr), "i" (-EFAULT), "0" (err))
+
+static void __iomem *rio_regs_win;
+
+static int (*saved_mcheck_exception)(struct pt_regs *regs);
+
+static int fsl_rio_mcheck_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *entry = NULL;
+	unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
+
+	if (reason & MCSR_BUS_RBERR) {
+		reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
+		if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
+			/* Check if we are prepared to handle this fault */
+			entry = search_exception_tables(regs->nip);
+			if (entry) {
+				pr_debug("RIO: %s - MC Exception handled\n",
+					 __func__);
+				out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
+					 0);
+				regs->msr |= MSR_RI;
+				regs->nip = entry->fixup;
+				return 1;
+			}
+		}
+	}
+
+	if (saved_mcheck_exception)
+		return saved_mcheck_exception(regs);
+	else
+		return cur_cpu_spec->machine_check(regs);
+}
+
 /**
  * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
  * @mport: RapidIO master port info
@@ -277,27 +370,44 @@
 {
 	struct rio_priv *priv = mport->priv;
 	u8 *data;
+	u32 rval, err = 0;
 
 	pr_debug
 	    ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
 	     index, destid, hopcount, offset, len);
-	out_be32(&priv->maint_atmu_regs->rowtar,
-		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
 
-	data = (u8 *) priv->maint_win + offset;
+	/* 16MB maintenance window possible */
+	/* allow only aligned access to maintenance registers */
+	if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
+		return -EINVAL;
+
+	out_be32(&priv->maint_atmu_regs->rowtar,
+		 (destid << 22) | (hopcount << 12) | (offset >> 12));
+	out_be32(&priv->maint_atmu_regs->rowtear,  (destid >> 10));
+
+	data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
 	switch (len) {
 	case 1:
-		*val = in_8((u8 *) data);
+		__fsl_read_rio_config(rval, data, err, "lbz");
 		break;
 	case 2:
-		*val = in_be16((u16 *) data);
+		__fsl_read_rio_config(rval, data, err, "lhz");
+		break;
+	case 4:
+		__fsl_read_rio_config(rval, data, err, "lwz");
 		break;
 	default:
-		*val = in_be32((u32 *) data);
-		break;
+		return -EINVAL;
 	}
 
-	return 0;
+	if (err) {
+		pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
+			 err, destid, hopcount, offset);
+	}
+
+	*val = rval;
+
+	return err;
 }
 
 /**
@@ -322,10 +432,17 @@
 	pr_debug
 	    ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
 	     index, destid, hopcount, offset, len, val);
-	out_be32(&priv->maint_atmu_regs->rowtar,
-		 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
 
-	data = (u8 *) priv->maint_win + offset;
+	/* 16MB maintenance windows possible */
+	/* allow only aligned access to maintenance registers */
+	if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
+		return -EINVAL;
+
+	out_be32(&priv->maint_atmu_regs->rowtar,
+		 (destid << 22) | (hopcount << 12) | (offset >> 12));
+	out_be32(&priv->maint_atmu_regs->rowtear,  (destid >> 10));
+
+	data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
 	switch (len) {
 	case 1:
 		out_8((u8 *) data, val);
@@ -333,9 +450,11 @@
 	case 2:
 		out_be16((u16 *) data, val);
 		break;
-	default:
+	case 4:
 		out_be32((u32 *) data, val);
 		break;
+	default:
+		return -EINVAL;
 	}
 
 	return 0;
@@ -930,6 +1049,223 @@
 	return rc;
 }
 
+/**
+ * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles port write interrupts. Parses a list of registered
+ * port write event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_port_write_handler(int irq, void *dev_instance)
+{
+	u32 ipwmr, ipwsr;
+	struct rio_mport *port = (struct rio_mport *)dev_instance;
+	struct rio_priv *priv = port->priv;
+	u32 epwisr, tmp;
+
+	ipwmr = in_be32(&priv->msg_regs->pwmr);
+	ipwsr = in_be32(&priv->msg_regs->pwsr);
+
+	epwisr = in_be32(priv->regs_win + RIO_EPWISR);
+	if (epwisr & 0x80000000) {
+		tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+		pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
+		out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+	}
+
+	if (!(epwisr & 0x00000001))
+		return IRQ_HANDLED;
+
+#ifdef DEBUG_PW
+	pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
+	if (ipwsr & RIO_IPWSR_QF)
+		pr_debug(" QF");
+	if (ipwsr & RIO_IPWSR_TE)
+		pr_debug(" TE");
+	if (ipwsr & RIO_IPWSR_QFI)
+		pr_debug(" QFI");
+	if (ipwsr & RIO_IPWSR_PWD)
+		pr_debug(" PWD");
+	if (ipwsr & RIO_IPWSR_PWB)
+		pr_debug(" PWB");
+	pr_debug(" )\n");
+#endif
+	out_be32(&priv->msg_regs->pwsr,
+		 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+	if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+		priv->port_write_msg.err_count++;
+		pr_info("RIO: Port-Write Transaction Err (%d)\n",
+			 priv->port_write_msg.err_count);
+	}
+	if (ipwsr & RIO_IPWSR_PWD) {
+		priv->port_write_msg.discard_count++;
+		pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+			 priv->port_write_msg.discard_count);
+	}
+
+	/* Schedule deferred processing if PW was received */
+	if (ipwsr & RIO_IPWSR_QFI) {
+		/* Save PW message (if there is room in FIFO),
+		 * otherwise discard it.
+		 */
+		if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
+			priv->port_write_msg.msg_count++;
+			kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
+				 RIO_PW_MSG_SIZE);
+		} else {
+			priv->port_write_msg.discard_count++;
+			pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+				 priv->port_write_msg.discard_count);
+		}
+		schedule_work(&priv->pw_work);
+	}
+
+	/* Issue Clear Queue command. This allows another
+	 * port-write to be received.
+	 */
+	out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
+	return IRQ_HANDLED;
+}
+
+static void fsl_pw_dpc(struct work_struct *work)
+{
+	struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
+	unsigned long flags;
+	u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
+
+	/*
+	 * Process port-write messages
+	 */
+	spin_lock_irqsave(&priv->pw_fifo_lock, flags);
+	while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
+			 RIO_PW_MSG_SIZE)) {
+		/* Process one message */
+		spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
+#ifdef DEBUG_PW
+		{
+		u32 i;
+		pr_debug("%s : Port-Write Message:", __func__);
+		for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
+			if ((i%4) == 0)
+				pr_debug("\n0x%02x: 0x%08x", i*4,
+					 msg_buffer[i]);
+			else
+				pr_debug(" 0x%08x", msg_buffer[i]);
+		}
+		pr_debug("\n");
+		}
+#endif
+		/* Pass the port-write message to RIO core for processing */
+		rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+		spin_lock_irqsave(&priv->pw_fifo_lock, flags);
+	}
+	spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
+}
+
+/**
+ * fsl_rio_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable:    1=enable; 0=disable port-write message handling
+ */
+static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
+{
+	struct rio_priv *priv = mport->priv;
+	u32 rval;
+
+	rval = in_be32(&priv->msg_regs->pwmr);
+
+	if (enable)
+		rval |= RIO_IPWMR_PWE;
+	else
+		rval &= ~RIO_IPWMR_PWE;
+
+	out_be32(&priv->msg_regs->pwmr, rval);
+
+	return 0;
+}
+
+/**
+ * fsl_rio_port_write_init - MPC85xx port write interface init
+ * @mport: Master port implementing the port write unit
+ *
+ * Initializes port write unit hardware and DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+static int fsl_rio_port_write_init(struct rio_mport *mport)
+{
+	struct rio_priv *priv = mport->priv;
+	int rc = 0;
+
+	/* Following configurations require a disabled port write controller */
+	out_be32(&priv->msg_regs->pwmr,
+		 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
+
+	/* Initialize port write */
+	priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
+					RIO_PW_MSG_SIZE,
+					&priv->port_write_msg.phys, GFP_KERNEL);
+	if (!priv->port_write_msg.virt) {
+		pr_err("RIO: unable allocate port write queue\n");
+		return -ENOMEM;
+	}
+
+	priv->port_write_msg.err_count = 0;
+	priv->port_write_msg.discard_count = 0;
+
+	/* Point dequeue/enqueue pointers at first entry */
+	out_be32(&priv->msg_regs->epwqbar, 0);
+	out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
+
+	pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
+		 in_be32(&priv->msg_regs->epwqbar),
+		 in_be32(&priv->msg_regs->pwqbar));
+
+	/* Clear interrupt status IPWSR */
+	out_be32(&priv->msg_regs->pwsr,
+		 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+	/* Configure port write contoller for snooping enable all reporting,
+	   clear queue full */
+	out_be32(&priv->msg_regs->pwmr,
+		 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
+
+
+	/* Hook up port-write handler */
+	rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
+			 "port-write", (void *)mport);
+	if (rc < 0) {
+		pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
+		goto err_out;
+	}
+
+	INIT_WORK(&priv->pw_work, fsl_pw_dpc);
+	spin_lock_init(&priv->pw_fifo_lock);
+	if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+		pr_err("FIFO allocation failed\n");
+		rc = -ENOMEM;
+		goto err_out_irq;
+	}
+
+	pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
+		 in_be32(&priv->msg_regs->pwmr),
+		 in_be32(&priv->msg_regs->pwsr));
+
+	return rc;
+
+err_out_irq:
+	free_irq(IRQ_RIO_PW(mport), (void *)mport);
+err_out:
+	dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
+			  priv->port_write_msg.virt,
+			  priv->port_write_msg.phys);
+	return rc;
+}
+
 static char *cmdline = NULL;
 
 static int fsl_rio_get_hdid(int index)
@@ -1057,7 +1393,7 @@
 	dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
 			law_start, law_size);
 
-	ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
+	ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
 	if (!ops) {
 		rc = -ENOMEM;
 		goto err_ops;
@@ -1067,6 +1403,7 @@
 	ops->cread = fsl_rio_config_read;
 	ops->cwrite = fsl_rio_config_write;
 	ops->dsend = fsl_rio_doorbell_send;
+	ops->pwenable = fsl_rio_pw_enable;
 
 	port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
 	if (!port) {
@@ -1089,11 +1426,12 @@
 	port->iores.flags = IORESOURCE_MEM;
 	port->iores.name = "rio_io_win";
 
+	priv->pwirq   = irq_of_parse_and_map(dev->node, 0);
 	priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
 	priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
 	priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
-	dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq,
-				priv->txirq, priv->rxirq);
+	dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
+		 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
 
 	rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
 	rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
@@ -1109,6 +1447,7 @@
 	rio_register_mport(port);
 
 	priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
+	rio_regs_win = priv->regs_win;
 
 	/* Probe the master port phy type */
 	ccsr = in_be32(priv->regs_win + RIO_CCSR);
@@ -1166,7 +1505,8 @@
 
 	/* Configure maintenance transaction window */
 	out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
-	out_be32(&priv->maint_atmu_regs->rowar, 0x80077015);	/* 4M */
+	out_be32(&priv->maint_atmu_regs->rowar,
+		 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
 
 	priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
 
@@ -1175,6 +1515,12 @@
 			(law_start + RIO_MAINT_WIN_SIZE) >> 12);
 	out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b);	/* 4k */
 	fsl_rio_doorbell_init(port);
+	fsl_rio_port_write_init(port);
+
+	saved_mcheck_exception = ppc_md.machine_check_exception;
+	ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
+	/* Ensure that RFXE is set */
+	mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
 
 	return 0;
 err:
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786f..be44d94 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
+#define ISA_DMA_THRESHOLD	(~0UL)
+
 #include <asm-generic/scatterlist.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e4d98de..541053e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -944,21 +944,21 @@
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
 	struct sys_device *s = &c->sysdev;
 	struct s390_idle_data *idle;
+	int err = 0;
 
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
 		idle = &per_cpu(s390_idle, cpu);
 		memset(idle, 0, sizeof(struct s390_idle_data));
-		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
-			return NOTIFY_BAD;
+		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
 		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
 		break;
 	}
-	return NOTIFY_OK;
+	return notifier_from_errno(err);
 }
 
 static struct notifier_block __cpuinitdata smp_cpu_nb = {
diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h
index 9f533b8..4fa1a66 100644
--- a/arch/score/include/asm/scatterlist.h
+++ b/arch/score/include/asm/scatterlist.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_SCORE_SCATTERLIST_H
 #define _ASM_SCORE_SCATTERLIST_H
 
+#define ISA_DMA_THRESHOLD	(~0UL)
+
 #include <asm-generic/scatterlist.h>
 
 #endif /* _ASM_SCORE_SCATTERLIST_H */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0e318c9..c5ee4ce 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -186,6 +186,9 @@
 config NEED_DMA_MAP_STATE
 	def_bool DMA_NONCOHERENT
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index d4104ce..6c4bbba 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -436,29 +436,6 @@
 					     0, sizeof(struct pt_dspregs),
 					     (const void __user *)data);
 #endif
-#ifdef CONFIG_BINFMT_ELF_FDPIC
-	case PTRACE_GETFDPIC: {
-		unsigned long tmp = 0;
-
-		switch (addr) {
-		case PTRACE_GETFDPIC_EXEC:
-			tmp = child->mm->context.exec_fdpic_loadmap;
-			break;
-		case PTRACE_GETFDPIC_INTERP:
-			tmp = child->mm->context.interp_fdpic_loadmap;
-			break;
-		default:
-			break;
-		}
-
-		ret = 0;
-		if (put_user(tmp, datap)) {
-			ret = -EFAULT;
-			break;
-		}
-		break;
-	}
-#endif
 	default:
 		ret = ptrace_request(child, request, addr, data);
 		break;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index d6781ce..6f1470b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -133,6 +133,9 @@
 config NEED_DMA_MAP_STATE
 	def_bool y
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config GENERIC_ISA_DMA
 	bool
 	default y if SPARC32
diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h
index d112025..433e45f 100644
--- a/arch/sparc/include/asm/scatterlist.h
+++ b/arch/sparc/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
 #ifndef _SPARC_SCATTERLIST_H
 #define _SPARC_SCATTERLIST_H
 
-#define sg_dma_len(sg)     	((sg)->dma_length)
-
 #include <asm-generic/scatterlist.h>
 
+#define ISA_DMA_THRESHOLD	(~0UL)
+#define ARCH_HAS_SG_CHAIN
+
 #endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e0c619c..dcb0593 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -109,6 +109,9 @@
 config NEED_DMA_MAP_STATE
        def_bool (X86_64 || DMAR || DMA_API_DEBUG)
 
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
 config GENERIC_ISA_DMA
 	def_bool y
 
@@ -1703,6 +1706,10 @@
 	def_bool X86_64
 	depends on NUMA
 
+config USE_PERCPU_NUMA_NODE_ID
+	def_bool X86_64
+	depends on NUMA
+
 menu "Power management and ACPI options"
 
 config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 75af592..fb0b187 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
 #ifndef _ASM_X86_SCATTERLIST_H
 #define _ASM_X86_SCATTERLIST_H
 
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
 #include <asm-generic/scatterlist.h>
 
+#define ISA_DMA_THRESHOLD (0x00ffffff)
+#define ARCH_HAS_SG_CHAIN
+
 #endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c5087d7..21899cc3 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -53,33 +53,29 @@
 extern int cpu_to_node_map[];
 
 /* Returns the number of the node containing CPU 'cpu' */
-static inline int cpu_to_node(int cpu)
+static inline int __cpu_to_node(int cpu)
 {
 	return cpu_to_node_map[cpu];
 }
-#define early_cpu_to_node(cpu)	cpu_to_node(cpu)
+#define early_cpu_to_node __cpu_to_node
+#define cpu_to_node __cpu_to_node
 
 #else /* CONFIG_X86_64 */
 
 /* Mappings between logical cpu number and node number */
 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 
-/* Returns the number of the current Node. */
-DECLARE_PER_CPU(int, node_number);
-#define numa_node_id()		percpu_read(node_number)
-
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
-extern int cpu_to_node(int cpu);
+/*
+ * override generic percpu implementation of cpu_to_node
+ */
+extern int __cpu_to_node(int cpu);
+#define cpu_to_node __cpu_to_node
+
 extern int early_cpu_to_node(int cpu);
 
 #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
 
-/* Returns the number of the node containing CPU 'cpu' */
-static inline int cpu_to_node(int cpu)
-{
-	return per_cpu(x86_cpu_to_node_map, cpu);
-}
-
 /* Same function but used if called before per_cpu areas are setup */
 static inline int early_cpu_to_node(int cpu)
 {
@@ -170,6 +166,10 @@
 {
 	return 0;
 }
+/*
+ * indicate override:
+ */
+#define numa_node_id numa_node_id
 
 static inline int early_cpu_to_node(int cpu)
 {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc83a00..68e4a6f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1121,9 +1121,9 @@
 	oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
-	if (cpu != 0 && percpu_read(node_number) == 0 &&
-	    cpu_to_node(cpu) != NUMA_NO_NODE)
-		percpu_write(node_number, cpu_to_node(cpu));
+	if (cpu != 0 && percpu_read(numa_node) == 0 &&
+	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
+		set_numa_node(early_cpu_to_node(cpu));
 #endif
 
 	me = current;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 81c499e..e1a0a3b 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@
 		mutex_unlock(&therm_cpu_lock);
 		break;
 	}
-	return err ? NOTIFY_BAD : NOTIFY_OK;
+	return notifier_from_errno(err);
 }
 
 static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8b862d59..1b7b31a 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -170,7 +170,7 @@
 		cpuid_device_destroy(cpu);
 		break;
 	}
-	return err ? NOTIFY_BAD : NOTIFY_OK;
+	return notifier_from_errno(err);
 }
 
 static struct notifier_block __refdata cpuid_class_cpu_notifier =
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 4d4468e..7bf2dc4 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -230,7 +230,7 @@
 		msr_device_destroy(cpu);
 		break;
 	}
-	return err ? NOTIFY_BAD : NOTIFY_OK;
+	return notifier_from_errno(err);
 }
 
 static struct notifier_block __refdata msr_class_cpu_notifier = {
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7d2829d..a5bc528 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@
 	.free_coherent = swiotlb_free_coherent,
 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 	.sync_single_for_device = swiotlb_sync_single_for_device,
-	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 	.sync_sg_for_device = swiotlb_sync_sg_for_device,
 	.map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b..a867940 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -265,10 +265,10 @@
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
 	/*
-	 * make sure boot cpu node_number is right, when boot cpu is on the
+	 * make sure boot cpu numa_node is right, when boot cpu is on the
 	 * node that doesn't have mem installed
 	 */
-	per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
+	set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
 #endif
 
 	/* Setup node to cpumask map */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 550df48..10c27bb 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,7 @@
 #include <linux/topology.h>
 #include <linux/module.h>
 #include <linux/bootmem.h>
+#include <linux/random.h>
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 # define DBG(x...) printk(KERN_DEBUG x)
@@ -65,3 +66,19 @@
 }
 EXPORT_SYMBOL(cpumask_of_node);
 #endif
+
+/*
+ * Return the bit number of a random bit set in the nodemask.
+ *   (returns -1 if nodemask is empty)
+ */
+int __node_random(const nodemask_t *maskp)
+{
+	int w, bit = -1;
+
+	w = nodes_weight(*maskp);
+	if (w)
+		bit = bitmap_ord_to_pos(maskp->bits,
+			get_random_int() % w, MAX_NUMNODES);
+	return bit;
+}
+EXPORT_SYMBOL(__node_random);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47..a7bcc23 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -33,9 +33,6 @@
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
-DEFINE_PER_CPU(int, node_number) = 0;
-EXPORT_PER_CPU_SYMBOL(node_number);
-
 /*
  * Map cpu index to node index
  */
@@ -809,7 +806,7 @@
 	per_cpu(x86_cpu_to_node_map, cpu) = node;
 
 	if (node != NUMA_NO_NODE)
-		per_cpu(node_number, cpu) = node;
+		set_cpu_numa_node(cpu, node);
 }
 
 void __cpuinit numa_clear_node(int cpu)
@@ -867,7 +864,7 @@
 	numa_set_cpumask(cpu, 0);
 }
 
-int cpu_to_node(int cpu)
+int __cpu_to_node(int cpu)
 {
 	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 		printk(KERN_WARNING
@@ -877,7 +874,7 @@
 	}
 	return per_cpu(x86_cpu_to_node_map, cpu);
 }
-EXPORT_SYMBOL(cpu_to_node);
+EXPORT_SYMBOL(__cpu_to_node);
 
 /*
  * Same function as cpu_to_node() but used if called before the
diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
index 810080b..b1f9fdc 100644
--- a/arch/xtensa/include/asm/scatterlist.h
+++ b/arch/xtensa/include/asm/scatterlist.h
@@ -11,28 +11,7 @@
 #ifndef _XTENSA_SCATTERLIST_H
 #define _XTENSA_SCATTERLIST_H
 
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-	unsigned long	sg_magic;
-#endif
-	unsigned long	page_link;
-	unsigned int	offset;
-	dma_addr_t	dma_address;
-	unsigned int	length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg)      ((sg)->dma_address)
-#define sg_dma_len(sg)          ((sg)->length)
-
+#include <asm-generic/scatterlist.h>
 
 #define ISA_DMA_THRESHOLD (~0UL)
 
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b132..9fc630c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@
 		topology_remove_dev(cpu);
 		break;
 	}
-	return rc ? NOTIFY_BAD : NOTIFY_OK;
+	return notifier_from_errno(rc);
 }
 
 static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175b..f09fc0e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,12 @@
 
 source "drivers/s390/char/Kconfig"
 
+config RAMOOPS
+	tristate "Log panic/oops to a RAM buffer"
+	default n
+	help
+	  This enables panic and oops messages to be logged to a circular
+	  buffer in RAM where it can be read back at some later point.
+
 endmenu
 
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4c..88d6eac 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@
 obj-$(CONFIG_TCG_TPM)		+= tpm/
 
 obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
+obj-$(CONFIG_RAMOOPS)		+= ramoops.o
 
 obj-$(CONFIG_JS_RTC)		+= js-rtc.o
 js-rtc-y = rtc.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a6..70312da 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@
 {
 	u32 httfea,baseaddr,enuscr;
 	struct pci_dev *dev1;
-	int i;
+	int i, ret;
 	unsigned size = amd64_fetch_size();
 
 	dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@
 
 	if (i == ARRAY_SIZE(uli_sizes)) {
 		dev_info(&pdev->dev, "no ULi size found for %d\n", size);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	/* shadow x86-64 registers into ULi registers */
 	pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
 
 	/* if x86-64 aperture base is beyond 4G, exit here */
-	if ((httfea & 0x7fff) >> (32 - 25))
-		return -ENODEV;
+	if ((httfea & 0x7fff) >> (32 - 25)) {
+		ret = -ENODEV;
+		goto put;
+	}
 
 	httfea = (httfea& 0x7fff) << 25;
 
@@ -420,9 +423,10 @@
 	enuscr= httfea+ (size * 1024 * 1024) - 1;
 	pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
 	pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+	ret = 0;
+put:
 	pci_dev_put(dev1);
-	return 0;
+	return ret;
 }
 
 
@@ -441,7 +445,7 @@
 {
 	u32 tmp, apbase, apbar, aplimit;
 	struct pci_dev *dev1;
-	int i;
+	int i, ret;
 	unsigned size = amd64_fetch_size();
 
 	dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@
 
 	if (i == ARRAY_SIZE(nforce3_sizes)) {
 		dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@
 	/* if x86-64 aperture base is beyond 4G, exit here */
 	if ( (apbase & 0x7fff) >> (32 - 25) ) {
 		dev_info(&pdev->dev, "aperture base > 4G\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@
 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
 
+	ret = 0;
+put:
 	pci_dev_put(dev1);
 
-	return 0;
+	return ret;
 }
 
 static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 63313a3..f4ae0e0 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -703,14 +703,9 @@
 	/* In general, the device is only openable by root anyway, so we're not
 	   particularly concerned that bogus ioctls can flood the console. */
 
-	adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
-	if (!adgl)
-		return -ENOMEM;
-
-	if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
-		kfree(adgl);
-		return -EFAULT;
-	}
+	adgl = memdup_user(argp, sizeof(struct st_ram_io));
+	if (IS_ERR(adgl))
+		return PTR_ERR(adgl);
 
 	lock_kernel();	
 	IndexCard = adgl->num_card-1;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad423..4f3f8c9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@
 			return rv;
 		}
 
-		printk(KERN_INFO
-		       "ipmi: Found new BMC (man_id: 0x%6.6x, "
-		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
-		       bmc->id.manufacturer_id,
-		       bmc->id.product_id,
-		       bmc->id.device_id);
+		dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+			 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+			 bmc->id.manufacturer_id,
+			 bmc->id.product_id,
+			 bmc->id.device_id);
 	}
 
 	/*
@@ -4037,8 +4036,8 @@
 
 static struct timer_list ipmi_timer;
 
-/* Call every ~100 ms. */
-#define IPMI_TIMEOUT_TIME	100
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME	1000
 
 /* How many jiffies does it take to get to the timeout time. */
 #define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 47ffe4a..35603dd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@
 };
 static char *si_to_str[] = { "kcs", "smic", "bt" };
 
+enum ipmi_addr_src {
+	SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+	SI_PCI,	SI_DEVICETREE, SI_DEFAULT
+};
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+					"ACPI", "SMBIOS", "PCI",
+					"device-tree", "default" };
+
 #define DEVICE_NAME "ipmi_si"
 
 static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@
 	int (*irq_setup)(struct smi_info *info);
 	void (*irq_cleanup)(struct smi_info *info);
 	unsigned int io_size;
-	char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
 	void (*addr_source_cleanup)(struct smi_info *info);
 	void *addr_source_data;
 
@@ -300,6 +308,7 @@
 
 static int unload_when_empty = 1;
 
+static int add_smi(struct smi_info *smi);
 static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
 
@@ -314,9 +323,14 @@
 {
 	/* Deliver the message to the upper layer with the lock
 	   released. */
-	spin_unlock(&(smi_info->si_lock));
-	ipmi_smi_msg_received(smi_info->intf, msg);
-	spin_lock(&(smi_info->si_lock));
+
+	if (smi_info->run_to_completion) {
+		ipmi_smi_msg_received(smi_info->intf, msg);
+	} else {
+		spin_unlock(&(smi_info->si_lock));
+		ipmi_smi_msg_received(smi_info->intf, msg);
+		spin_lock(&(smi_info->si_lock));
+	}
 }
 
 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		start_disable_irq(smi_info);
 		smi_info->interrupt_disabled = 1;
+		if (!atomic_read(&smi_info->stop_operation))
+			mod_timer(&smi_info->si_timer,
+				  jiffies + SI_TIMEOUT_JIFFIES);
 	}
 }
 
@@ -576,9 +593,8 @@
 		smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 		if (msg[2] != 0) {
 			/* Error clearing flags */
-			printk(KERN_WARNING
-			       "ipmi_si: Error clearing flags: %2.2x\n",
-			       msg[2]);
+			dev_warn(smi_info->dev,
+				 "Error clearing flags: %2.2x\n", msg[2]);
 		}
 		if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
 			start_enable_irq(smi_info);
@@ -670,9 +686,8 @@
 		/* We got the flags from the SMI, now handle them. */
 		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 		if (msg[2] != 0) {
-			printk(KERN_WARNING
-			       "ipmi_si: Could not enable interrupts"
-			       ", failed get, using polled mode.\n");
+			dev_warn(smi_info->dev, "Could not enable interrupts"
+				 ", failed get, using polled mode.\n");
 			smi_info->si_state = SI_NORMAL;
 		} else {
 			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@
 
 		/* We got the flags from the SMI, now handle them. */
 		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
-		if (msg[2] != 0) {
-			printk(KERN_WARNING
-			       "ipmi_si: Could not enable interrupts"
-			       ", failed set, using polled mode.\n");
-		}
+		if (msg[2] != 0)
+			dev_warn(smi_info->dev, "Could not enable interrupts"
+				 ", failed set, using polled mode.\n");
+		else
+			smi_info->interrupt_disabled = 0;
 		smi_info->si_state = SI_NORMAL;
 		break;
 	}
@@ -709,9 +724,8 @@
 		/* We got the flags from the SMI, now handle them. */
 		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 		if (msg[2] != 0) {
-			printk(KERN_WARNING
-			       "ipmi_si: Could not disable interrupts"
-			       ", failed get.\n");
+			dev_warn(smi_info->dev, "Could not disable interrupts"
+				 ", failed get.\n");
 			smi_info->si_state = SI_NORMAL;
 		} else {
 			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@
 		/* We got the flags from the SMI, now handle them. */
 		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 		if (msg[2] != 0) {
-			printk(KERN_WARNING
-			       "ipmi_si: Could not disable interrupts"
-			       ", failed set.\n");
+			dev_warn(smi_info->dev, "Could not disable interrupts"
+				 ", failed set.\n");
 		}
 		smi_info->si_state = SI_NORMAL;
 		break;
@@ -877,6 +890,11 @@
 	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
+	mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+	if (smi_info->thread)
+		wake_up_process(smi_info->thread);
+
 	if (smi_info->run_to_completion) {
 		/*
 		 * If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@
 			; /* do nothing */
 		else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
 			schedule();
+		else if (smi_result == SI_SM_IDLE)
+			schedule_timeout_interruptible(100);
 		else
 			schedule_timeout_interruptible(0);
 	}
@@ -1039,6 +1059,7 @@
 	unsigned long     flags;
 	unsigned long     jiffies_now;
 	long              time_diff;
+	long		  timeout;
 #ifdef DEBUG_TIMING
 	struct timeval    t;
 #endif
@@ -1059,9 +1080,9 @@
 
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		/* Running with interrupts, only do long timeouts. */
-		smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
 		smi_inc_stat(smi_info, long_timeouts);
-		goto do_add_timer;
+		goto do_mod_timer;
 	}
 
 	/*
@@ -1070,14 +1091,15 @@
 	 */
 	if (smi_result == SI_SM_CALL_WITH_DELAY) {
 		smi_inc_stat(smi_info, short_timeouts);
-		smi_info->si_timer.expires = jiffies + 1;
+		timeout = jiffies + 1;
 	} else {
 		smi_inc_stat(smi_info, long_timeouts);
-		smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
 	}
 
- do_add_timer:
-	add_timer(&(smi_info->si_timer));
+ do_mod_timer:
+	if (smi_result != SI_SM_IDLE)
+		mod_timer(&(smi_info->si_timer), timeout);
 }
 
 static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@
 		new_smi->thread = kthread_run(ipmi_thread, new_smi,
 					      "kipmi%d", new_smi->intf_num);
 		if (IS_ERR(new_smi->thread)) {
-			printk(KERN_NOTICE "ipmi_si_intf: Could not start"
-			       " kernel thread due to error %ld, only using"
-			       " timers to drive the interface\n",
-			       PTR_ERR(new_smi->thread));
+			dev_notice(new_smi->dev, "Could not start"
+				   " kernel thread due to error %ld, only using"
+				   " timers to drive the interface\n",
+				   PTR_ERR(new_smi->thread));
 			new_smi->thread = NULL;
 		}
 	}
@@ -1308,14 +1330,13 @@
 				 DEVICE_NAME,
 				 info);
 	if (rv) {
-		printk(KERN_WARNING
-		       "ipmi_si: %s unable to claim interrupt %d,"
-		       " running polled\n",
-		       DEVICE_NAME, info->irq);
+		dev_warn(info->dev, "%s unable to claim interrupt %d,"
+			 " running polled\n",
+			 DEVICE_NAME, info->irq);
 		info->irq = 0;
 	} else {
 		info->irq_cleanup = std_irq_cleanup;
-		printk("  Using irq %d\n", info->irq);
+		dev_info(info->dev, "Using irq %d\n", info->irq);
 	}
 
 	return rv;
@@ -1406,8 +1427,8 @@
 		info->io.outputb = port_outl;
 		break;
 	default:
-		printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
-		       info->io.regsize);
+		dev_warn(info->dev, "Invalid register size: %d\n",
+			 info->io.regsize);
 		return -EINVAL;
 	}
 
@@ -1529,8 +1550,8 @@
 		break;
 #endif
 	default:
-		printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
-		       info->io.regsize);
+		dev_warn(info->dev, "Invalid register size: %d\n",
+			 info->io.regsize);
 		return -EINVAL;
 	}
 
@@ -1755,7 +1776,7 @@
 				goto out;
 			}
 
-			info->addr_source = "hotmod";
+			info->addr_source = SI_HOTMOD;
 			info->si_type = si_type;
 			info->io.addr_data = addr;
 			info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@
 				info->irq_setup = std_irq_setup;
 			info->slave_addr = ipmb;
 
-			try_smi_init(info);
+			if (!add_smi(info))
+				if (try_smi_init(info))
+					cleanup_one_si(info);
 		} else {
 			/* remove */
 			struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@
 		if (!info)
 			return;
 
-		info->addr_source = "hardcoded";
+		info->addr_source = SI_HARDCODED;
+		printk(KERN_INFO PFX "probing via hardcoded address\n");
 
 		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
 			info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@
 		} else if (strcmp(si_type[i], "bt") == 0) {
 			info->si_type = SI_BT;
 		} else {
-			printk(KERN_WARNING
-			       "ipmi_si: Interface type specified "
+			printk(KERN_WARNING PFX "Interface type specified "
 			       "for interface %d, was invalid: %s\n",
 			       i, si_type[i]);
 			kfree(info);
@@ -1841,11 +1864,9 @@
 			info->io.addr_data = addrs[i];
 			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
 		} else {
-			printk(KERN_WARNING
-			       "ipmi_si: Interface type specified "
-			       "for interface %d, "
-			       "but port and address were not set or "
-			       "set to zero.\n", i);
+			printk(KERN_WARNING PFX "Interface type specified "
+			       "for interface %d, but port and address were "
+			       "not set or set to zero.\n", i);
 			kfree(info);
 			continue;
 		}
@@ -1863,7 +1884,9 @@
 			info->irq_setup = std_irq_setup;
 		info->slave_addr = slave_addrs[i];
 
-		try_smi_init(info);
+		if (!add_smi(info))
+			if (try_smi_init(info))
+				cleanup_one_si(info);
 	}
 }
 
@@ -1923,15 +1946,13 @@
 					  &ipmi_acpi_gpe,
 					  info);
 	if (status != AE_OK) {
-		printk(KERN_WARNING
-		       "ipmi_si: %s unable to claim ACPI GPE %d,"
-		       " running polled\n",
-		       DEVICE_NAME, info->irq);
+		dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+			 " running polled\n", DEVICE_NAME, info->irq);
 		info->irq = 0;
 		return -EINVAL;
 	} else {
 		info->irq_cleanup = acpi_gpe_irq_cleanup;
-		printk("  Using ACPI GPE %d\n", info->irq);
+		dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
 		return 0;
 	}
 }
@@ -1989,8 +2010,8 @@
 	u8 		 addr_space;
 
 	if (spmi->IPMIlegacy != 1) {
-	    printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
-	    return -ENODEV;
+		printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+		return -ENODEV;
 	}
 
 	if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@
 
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info) {
-		printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+		printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
 		return -ENOMEM;
 	}
 
-	info->addr_source = "SPMI";
+	info->addr_source = SI_SPMI;
+	printk(KERN_INFO PFX "probing via SPMI\n");
 
 	/* Figure out the interface type. */
 	switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@
 		info->si_type = SI_BT;
 		break;
 	default:
-		printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
-			spmi->InterfaceType);
+		printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+		       spmi->InterfaceType);
 		kfree(info);
 		return -EIO;
 	}
@@ -2055,13 +2077,12 @@
 		info->io.addr_type = IPMI_IO_ADDR_SPACE;
 	} else {
 		kfree(info);
-		printk(KERN_WARNING
-		       "ipmi_si: Unknown ACPI I/O Address type\n");
+		printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
 		return -EIO;
 	}
 	info->io.addr_data = spmi->addr.address;
 
-	try_smi_init(info);
+	add_smi(info);
 
 	return 0;
 }
@@ -2093,6 +2114,7 @@
 {
 	struct acpi_device *acpi_dev;
 	struct smi_info *info;
+	struct resource *res;
 	acpi_handle handle;
 	acpi_status status;
 	unsigned long long tmp;
@@ -2105,7 +2127,8 @@
 	if (!info)
 		return -ENOMEM;
 
-	info->addr_source = "ACPI";
+	info->addr_source = SI_ACPI;
+	printk(KERN_INFO PFX "probing via ACPI\n");
 
 	handle = acpi_dev->handle;
 
@@ -2125,22 +2148,26 @@
 		info->si_type = SI_BT;
 		break;
 	default:
-		dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+		dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
 		goto err_free;
 	}
 
-	if (pnp_port_valid(dev, 0)) {
+	res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+	if (res) {
 		info->io_setup = port_setup;
 		info->io.addr_type = IPMI_IO_ADDR_SPACE;
-		info->io.addr_data = pnp_port_start(dev, 0);
-	} else if (pnp_mem_valid(dev, 0)) {
-		info->io_setup = mem_setup;
-		info->io.addr_type = IPMI_MEM_ADDR_SPACE;
-		info->io.addr_data = pnp_mem_start(dev, 0);
 	} else {
+		res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+		if (res) {
+			info->io_setup = mem_setup;
+			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+		}
+	}
+	if (!res) {
 		dev_err(&dev->dev, "no I/O or memory address\n");
 		goto err_free;
 	}
+	info->io.addr_data = res->start;
 
 	info->io.regspacing = DEFAULT_REGSPACING;
 	info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@
 		info->irq_setup = std_irq_setup;
 	}
 
-	info->dev = &acpi_dev->dev;
+	info->dev = &dev->dev;
 	pnp_set_drvdata(dev, info);
 
-	return try_smi_init(info);
+	dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+		 res, info->io.regsize, info->io.regspacing,
+		 info->irq);
+
+	return add_smi(info);
 
 err_free:
 	kfree(info);
@@ -2264,12 +2295,12 @@
 
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info) {
-		printk(KERN_ERR
-		       "ipmi_si: Could not allocate SI data\n");
+		printk(KERN_ERR PFX "Could not allocate SI data\n");
 		return;
 	}
 
-	info->addr_source = "SMBIOS";
+	info->addr_source = SI_SMBIOS;
+	printk(KERN_INFO PFX "probing via SMBIOS\n");
 
 	switch (ipmi_data->type) {
 	case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@
 
 	default:
 		kfree(info);
-		printk(KERN_WARNING
-		       "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+		printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
 		       ipmi_data->addr_space);
 		return;
 	}
@@ -2318,7 +2348,7 @@
 	if (info->irq)
 		info->irq_setup = std_irq_setup;
 
-	try_smi_init(info);
+	add_smi(info);
 }
 
 static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@
 	if (!info)
 		return -ENOMEM;
 
-	info->addr_source = "PCI";
+	info->addr_source = SI_PCI;
+	dev_info(&pdev->dev, "probing via PCI");
 
 	switch (class_type) {
 	case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@
 
 	default:
 		kfree(info);
-		printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
-		       pci_name(pdev), class_type);
+		dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
 		return -ENOMEM;
 	}
 
 	rv = pci_enable_device(pdev);
 	if (rv) {
-		printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
-		       pci_name(pdev));
+		dev_err(&pdev->dev, "couldn't enable PCI device\n");
 		kfree(info);
 		return rv;
 	}
@@ -2421,7 +2450,11 @@
 	info->dev = &pdev->dev;
 	pci_set_drvdata(pdev, info);
 
-	return try_smi_init(info);
+	dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+		&pdev->resource[0], info->io.regsize, info->io.regspacing,
+		info->irq);
+
+	return add_smi(info);
 }
 
 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2473,7 +2506,7 @@
 	int ret;
 	int proplen;
 
-	dev_info(&dev->dev, PFX "probing via device tree\n");
+	dev_info(&dev->dev, "probing via device tree\n");
 
 	ret = of_address_to_resource(np, 0, &resource);
 	if (ret) {
@@ -2503,12 +2536,12 @@
 
 	if (!info) {
 		dev_err(&dev->dev,
-			PFX "could not allocate memory for OF probe\n");
+			"could not allocate memory for OF probe\n");
 		return -ENOMEM;
 	}
 
 	info->si_type		= (enum si_type) match->data;
-	info->addr_source	= "device-tree";
+	info->addr_source	= SI_DEVICETREE;
 	info->irq_setup		= std_irq_setup;
 
 	if (resource.flags & IORESOURCE_IO) {
@@ -2528,13 +2561,13 @@
 	info->irq		= irq_of_parse_and_map(dev->dev.of_node, 0);
 	info->dev		= &dev->dev;
 
-	dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
+	dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
 		info->io.addr_data, info->io.regsize, info->io.regspacing,
 		info->irq);
 
 	dev_set_drvdata(&dev->dev, info);
 
-	return try_smi_init(info);
+	return add_smi(info);
 }
 
 static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2643,9 +2676,8 @@
 
 	rv = wait_for_msg_done(smi_info);
 	if (rv) {
-		printk(KERN_WARNING
-		       "ipmi_si: Error getting response from get global,"
-		       " enables command, the event buffer is not"
+		printk(KERN_WARNING PFX "Error getting response from get"
+		       " global enables command, the event buffer is not"
 		       " enabled.\n");
 		goto out;
 	}
@@ -2657,10 +2689,8 @@
 			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
 			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
 			resp[2] != 0) {
-		printk(KERN_WARNING
-		       "ipmi_si: Invalid return from get global"
-		       " enables command, cannot enable the event"
-		       " buffer.\n");
+		printk(KERN_WARNING PFX "Invalid return from get global"
+		       " enables command, cannot enable the event buffer.\n");
 		rv = -EINVAL;
 		goto out;
 	}
@@ -2676,9 +2706,8 @@
 
 	rv = wait_for_msg_done(smi_info);
 	if (rv) {
-		printk(KERN_WARNING
-		       "ipmi_si: Error getting response from set global,"
-		       " enables command, the event buffer is not"
+		printk(KERN_WARNING PFX "Error getting response from set"
+		       " global, enables command, the event buffer is not"
 		       " enabled.\n");
 		goto out;
 	}
@@ -2689,10 +2718,8 @@
 	if (resp_len < 3 ||
 			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
 			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
-		printk(KERN_WARNING
-		       "ipmi_si: Invalid return from get global,"
-		       "enables command, not enable the event"
-		       " buffer.\n");
+		printk(KERN_WARNING PFX "Invalid return from get global,"
+		       "enables command, not enable the event buffer.\n");
 		rv = -EINVAL;
 		goto out;
 	}
@@ -2951,7 +2978,7 @@
 		if (!info)
 			return;
 
-		info->addr_source = NULL;
+		info->addr_source = SI_DEFAULT;
 
 		info->si_type = ipmi_defaults[i].type;
 		info->io_setup = port_setup;
@@ -2963,14 +2990,16 @@
 		info->io.regsize = DEFAULT_REGSPACING;
 		info->io.regshift = 0;
 
-		if (try_smi_init(info) == 0) {
-			/* Found one... */
-			printk(KERN_INFO "ipmi_si: Found default %s state"
-			       " machine at %s address 0x%lx\n",
-			       si_to_str[info->si_type],
-			       addr_space_to_str[info->io.addr_type],
-			       info->io.addr_data);
-			return;
+		if (add_smi(info) == 0) {
+			if ((try_smi_init(info)) == 0) {
+				/* Found one... */
+				printk(KERN_INFO PFX "Found default %s"
+				" state machine at %s address 0x%lx\n",
+				si_to_str[info->si_type],
+				addr_space_to_str[info->io.addr_type],
+				info->io.addr_data);
+			} else
+				cleanup_one_si(info);
 		}
 	}
 }
@@ -2989,34 +3018,48 @@
 	return 1;
 }
 
-static int try_smi_init(struct smi_info *new_smi)
+static int add_smi(struct smi_info *new_smi)
 {
-	int rv;
-	int i;
+	int rv = 0;
 
-	if (new_smi->addr_source) {
-		printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
-		       " machine at %s address 0x%lx, slave address 0x%x,"
-		       " irq %d\n",
-		       new_smi->addr_source,
-		       si_to_str[new_smi->si_type],
-		       addr_space_to_str[new_smi->io.addr_type],
-		       new_smi->io.addr_data,
-		       new_smi->slave_addr, new_smi->irq);
-	}
-
+	printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+			ipmi_addr_src_to_str[new_smi->addr_source],
+			si_to_str[new_smi->si_type]);
 	mutex_lock(&smi_infos_lock);
 	if (!is_new_interface(new_smi)) {
-		printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+		printk(KERN_CONT PFX "duplicate interface\n");
 		rv = -EBUSY;
 		goto out_err;
 	}
 
+	printk(KERN_CONT "\n");
+
 	/* So we know not to free it unless we have allocated one. */
 	new_smi->intf = NULL;
 	new_smi->si_sm = NULL;
 	new_smi->handlers = NULL;
 
+	list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+	mutex_unlock(&smi_infos_lock);
+	return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+	int rv = 0;
+	int i;
+
+	printk(KERN_INFO PFX "Trying %s-specified %s state"
+	       " machine at %s address 0x%lx, slave address 0x%x,"
+	       " irq %d\n",
+	       ipmi_addr_src_to_str[new_smi->addr_source],
+	       si_to_str[new_smi->si_type],
+	       addr_space_to_str[new_smi->io.addr_type],
+	       new_smi->io.addr_data,
+	       new_smi->slave_addr, new_smi->irq);
+
 	switch (new_smi->si_type) {
 	case SI_KCS:
 		new_smi->handlers = &kcs_smi_handlers;
@@ -3039,7 +3082,8 @@
 	/* Allocate the state machine's data and initialize it. */
 	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
 	if (!new_smi->si_sm) {
-		printk(KERN_ERR "Could not allocate state machine memory\n");
+		printk(KERN_ERR PFX
+		       "Could not allocate state machine memory\n");
 		rv = -ENOMEM;
 		goto out_err;
 	}
@@ -3049,7 +3093,7 @@
 	/* Now that we know the I/O size, we can set up the I/O. */
 	rv = new_smi->io_setup(new_smi);
 	if (rv) {
-		printk(KERN_ERR "Could not set up I/O space\n");
+		printk(KERN_ERR PFX "Could not set up I/O space\n");
 		goto out_err;
 	}
 
@@ -3059,8 +3103,7 @@
 	/* Do low-level detection first. */
 	if (new_smi->handlers->detect(new_smi->si_sm)) {
 		if (new_smi->addr_source)
-			printk(KERN_INFO "ipmi_si: Interface detection"
-			       " failed\n");
+			printk(KERN_INFO PFX "Interface detection failed\n");
 		rv = -ENODEV;
 		goto out_err;
 	}
@@ -3072,7 +3115,7 @@
 	rv = try_get_dev_id(new_smi);
 	if (rv) {
 		if (new_smi->addr_source)
-			printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+			printk(KERN_INFO PFX "There appears to be no BMC"
 			       " at this location\n");
 		goto out_err;
 	}
@@ -3088,7 +3131,7 @@
 	for (i = 0; i < SI_NUM_STATS; i++)
 		atomic_set(&new_smi->stats[i], 0);
 
-	new_smi->interrupt_disabled = 0;
+	new_smi->interrupt_disabled = 1;
 	atomic_set(&new_smi->stop_operation, 0);
 	new_smi->intf_num = smi_num;
 	smi_num++;
@@ -3114,9 +3157,8 @@
 		new_smi->pdev = platform_device_alloc("ipmi_si",
 						      new_smi->intf_num);
 		if (!new_smi->pdev) {
-			printk(KERN_ERR
-			       "ipmi_si_intf:"
-			       " Unable to allocate platform device\n");
+			printk(KERN_ERR PFX
+			       "Unable to allocate platform device\n");
 			goto out_err;
 		}
 		new_smi->dev = &new_smi->pdev->dev;
@@ -3124,9 +3166,8 @@
 
 		rv = platform_device_add(new_smi->pdev);
 		if (rv) {
-			printk(KERN_ERR
-			       "ipmi_si_intf:"
-			       " Unable to register system interface device:"
+			printk(KERN_ERR PFX
+			       "Unable to register system interface device:"
 			       " %d\n",
 			       rv);
 			goto out_err;
@@ -3141,9 +3182,8 @@
 			       "bmc",
 			       new_smi->slave_addr);
 	if (rv) {
-		printk(KERN_ERR
-		       "ipmi_si: Unable to register device: error %d\n",
-		       rv);
+		dev_err(new_smi->dev, "Unable to register device: error %d\n",
+			rv);
 		goto out_err_stop_timer;
 	}
 
@@ -3151,9 +3191,7 @@
 				     type_file_read_proc,
 				     new_smi);
 	if (rv) {
-		printk(KERN_ERR
-		       "ipmi_si: Unable to create proc entry: %d\n",
-		       rv);
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
 		goto out_err_stop_timer;
 	}
 
@@ -3161,9 +3199,7 @@
 				     stat_file_read_proc,
 				     new_smi);
 	if (rv) {
-		printk(KERN_ERR
-		       "ipmi_si: Unable to create proc entry: %d\n",
-		       rv);
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
 		goto out_err_stop_timer;
 	}
 
@@ -3171,18 +3207,12 @@
 				     param_read_proc,
 				     new_smi);
 	if (rv) {
-		printk(KERN_ERR
-		       "ipmi_si: Unable to create proc entry: %d\n",
-		       rv);
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
 		goto out_err_stop_timer;
 	}
 
-	list_add_tail(&new_smi->link, &smi_infos);
-
-	mutex_unlock(&smi_infos_lock);
-
-	printk(KERN_INFO "IPMI %s interface initialized\n",
-	       si_to_str[new_smi->si_type]);
+	dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+		 si_to_str[new_smi->si_type]);
 
 	return 0;
 
@@ -3191,11 +3221,17 @@
 	wait_for_timer_and_thread(new_smi);
 
  out_err:
-	if (new_smi->intf)
-		ipmi_unregister_smi(new_smi->intf);
+	new_smi->interrupt_disabled = 1;
 
-	if (new_smi->irq_cleanup)
+	if (new_smi->intf) {
+		ipmi_unregister_smi(new_smi->intf);
+		new_smi->intf = NULL;
+	}
+
+	if (new_smi->irq_cleanup) {
 		new_smi->irq_cleanup(new_smi);
+		new_smi->irq_cleanup = NULL;
+	}
 
 	/*
 	 * Wait until we know that we are out of any interrupt
@@ -3208,18 +3244,21 @@
 		if (new_smi->handlers)
 			new_smi->handlers->cleanup(new_smi->si_sm);
 		kfree(new_smi->si_sm);
+		new_smi->si_sm = NULL;
 	}
-	if (new_smi->addr_source_cleanup)
+	if (new_smi->addr_source_cleanup) {
 		new_smi->addr_source_cleanup(new_smi);
-	if (new_smi->io_cleanup)
+		new_smi->addr_source_cleanup = NULL;
+	}
+	if (new_smi->io_cleanup) {
 		new_smi->io_cleanup(new_smi);
+		new_smi->io_cleanup = NULL;
+	}
 
-	if (new_smi->dev_registered)
+	if (new_smi->dev_registered) {
 		platform_device_unregister(new_smi->pdev);
-
-	kfree(new_smi);
-
-	mutex_unlock(&smi_infos_lock);
+		new_smi->dev_registered = 0;
+	}
 
 	return rv;
 }
@@ -3229,6 +3268,8 @@
 	int  i;
 	char *str;
 	int  rv;
+	struct smi_info *e;
+	enum ipmi_addr_src type = SI_INVALID;
 
 	if (initialized)
 		return 0;
@@ -3237,9 +3278,7 @@
 	/* Register the device drivers. */
 	rv = driver_register(&ipmi_driver.driver);
 	if (rv) {
-		printk(KERN_ERR
-		       "init_ipmi_si: Unable to register driver: %d\n",
-		       rv);
+		printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
 		return rv;
 	}
 
@@ -3263,6 +3302,24 @@
 
 	hardcode_find_bmc();
 
+	/* If the user gave us a device, they presumably want us to use it */
+	mutex_lock(&smi_infos_lock);
+	if (!list_empty(&smi_infos)) {
+		mutex_unlock(&smi_infos_lock);
+		return 0;
+	}
+	mutex_unlock(&smi_infos_lock);
+
+#ifdef CONFIG_PCI
+	rv = pci_register_driver(&ipmi_pci_driver);
+	if (rv)
+		printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
+#endif
+
+#ifdef CONFIG_ACPI
+	pnp_register_driver(&ipmi_pnp_driver);
+#endif
+
 #ifdef CONFIG_DMI
 	dmi_find_bmc();
 #endif
@@ -3270,31 +3327,56 @@
 #ifdef CONFIG_ACPI
 	spmi_find_bmc();
 #endif
-#ifdef CONFIG_ACPI
-	pnp_register_driver(&ipmi_pnp_driver);
-#endif
-
-#ifdef CONFIG_PCI
-	rv = pci_register_driver(&ipmi_pci_driver);
-	if (rv)
-		printk(KERN_ERR
-		       "init_ipmi_si: Unable to register PCI driver: %d\n",
-		       rv);
-#endif
 
 #ifdef CONFIG_PPC_OF
 	of_register_platform_driver(&ipmi_of_platform_driver);
 #endif
 
+	/* We prefer devices with interrupts, but in the case of a machine
+	   with multiple BMCs we assume that there will be several instances
+	   of a given type so if we succeed in registering a type then also
+	   try to register everything else of the same type */
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry(e, &smi_infos, link) {
+		/* Try to register a device if it has an IRQ and we either
+		   haven't successfully registered a device yet or this
+		   device has the same type as one we successfully registered */
+		if (e->irq && (!type || e->addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->addr_source;
+			}
+		}
+	}
+
+	/* type will only have been set if we successfully registered an si */
+	if (type) {
+		mutex_unlock(&smi_infos_lock);
+		return 0;
+	}
+
+	/* Fall back to the preferred device */
+
+	list_for_each_entry(e, &smi_infos, link) {
+		if (!e->irq && (!type || e->addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->addr_source;
+			}
+		}
+	}
+	mutex_unlock(&smi_infos_lock);
+
+	if (type)
+		return 0;
+
 	if (si_trydefaults) {
 		mutex_lock(&smi_infos_lock);
 		if (list_empty(&smi_infos)) {
 			/* No BMC was found, try defaults. */
 			mutex_unlock(&smi_infos_lock);
 			default_find_bmc();
-		} else {
+		} else
 			mutex_unlock(&smi_infos_lock);
-		}
 	}
 
 	mutex_lock(&smi_infos_lock);
@@ -3308,8 +3390,8 @@
 		of_unregister_platform_driver(&ipmi_of_platform_driver);
 #endif
 		driver_unregister(&ipmi_driver.driver);
-		printk(KERN_WARNING
-		       "ipmi_si: Unable to find any System Interface(s)\n");
+		printk(KERN_WARNING PFX
+		       "Unable to find any System Interface(s)\n");
 		return -ENODEV;
 	} else {
 		mutex_unlock(&smi_infos_lock);
@@ -3320,7 +3402,7 @@
 
 static void cleanup_one_si(struct smi_info *to_clean)
 {
-	int           rv;
+	int           rv = 0;
 	unsigned long flags;
 
 	if (!to_clean)
@@ -3364,14 +3446,16 @@
 		schedule_timeout_uninterruptible(1);
 	}
 
-	rv = ipmi_unregister_smi(to_clean->intf);
+	if (to_clean->intf)
+		rv = ipmi_unregister_smi(to_clean->intf);
+
 	if (rv) {
-		printk(KERN_ERR
-		       "ipmi_si: Unable to unregister device: errno=%d\n",
+		printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
 		       rv);
 	}
 
-	to_clean->handlers->cleanup(to_clean->si_sm);
+	if (to_clean->handlers)
+		to_clean->handlers->cleanup(to_clean->si_sm);
 
 	kfree(to_clean->si_sm);
 
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd3754..02abfdd 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@
 	char *name;
 	int fl;
 
-	name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL);
+	name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
 	if (name == NULL)
 		return -ENOMEM;
 
-	sprintf (name, CHRDEV "%x", minor);
-
 	port = parport_find_number (minor);
 	if (!port) {
 		printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 0000000..74f00b5
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmsg_dump.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+#define RAMOOPS_HEADER_SIZE   (5 + sizeof(struct timeval))
+
+#define RECORD_SIZE 4096
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+		"start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+		"size of reserved RAM used to store oops/panic logs");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+		"set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct ramoops_context {
+	struct kmsg_dumper dump;
+	void *virt_addr;
+	phys_addr_t phys_addr;
+	unsigned long size;
+	int count;
+	int max_count;
+} oops_cxt;
+
+static void ramoops_do_dump(struct kmsg_dumper *dumper,
+		enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+		const char *s2, unsigned long l2)
+{
+	struct ramoops_context *cxt = container_of(dumper,
+			struct ramoops_context, dump);
+	unsigned long s1_start, s2_start;
+	unsigned long l1_cpy, l2_cpy;
+	int res;
+	char *buf;
+	struct timeval timestamp;
+
+	/* Only dump oopses if dump_oops is set */
+	if (reason == KMSG_DUMP_OOPS && !dump_oops)
+		return;
+
+	buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+	memset(buf, '\0', RECORD_SIZE);
+	res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
+	buf += res;
+	do_gettimeofday(&timestamp);
+	res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+	buf += res;
+
+	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
+	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
+
+	s2_start = l2 - l2_cpy;
+	s1_start = l1 - l1_cpy;
+
+	memcpy(buf, s1 + s1_start, l1_cpy);
+	memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
+
+	cxt->count = (cxt->count + 1) % cxt->max_count;
+}
+
+static int __init ramoops_init(void)
+{
+	struct ramoops_context *cxt = &oops_cxt;
+	int err = -EINVAL;
+
+	if (!mem_size) {
+		printk(KERN_ERR "ramoops: invalid size specification");
+		goto fail3;
+	}
+
+	rounddown_pow_of_two(mem_size);
+
+	if (mem_size < RECORD_SIZE) {
+		printk(KERN_ERR "ramoops: size too small");
+		goto fail3;
+	}
+
+	cxt->max_count = mem_size / RECORD_SIZE;
+	cxt->count = 0;
+	cxt->size = mem_size;
+	cxt->phys_addr = mem_address;
+
+	if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
+		printk(KERN_ERR "ramoops: request mem region failed");
+		err = -EINVAL;
+		goto fail3;
+	}
+
+	cxt->virt_addr = ioremap(cxt->phys_addr,  cxt->size);
+	if (!cxt->virt_addr) {
+		printk(KERN_ERR "ramoops: ioremap failed");
+		goto fail2;
+	}
+
+	cxt->dump.dump = ramoops_do_dump;
+	err = kmsg_dump_register(&cxt->dump);
+	if (err) {
+		printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+		goto fail1;
+	}
+
+	return 0;
+
+fail1:
+	iounmap(cxt->virt_addr);
+fail2:
+	release_mem_region(cxt->phys_addr, cxt->size);
+fail3:
+	return err;
+}
+
+static void __exit ramoops_exit(void)
+{
+	struct ramoops_context *cxt = &oops_cxt;
+
+	if (kmsg_dump_unregister(&cxt->dump) < 0)
+		printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+
+	iounmap(cxt->virt_addr);
+	release_mem_region(cxt->phys_addr, cxt->size);
+}
+
+
+module_init(ramoops_init);
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d116..7cdb6ee 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@
 	font.charcount = op->charcount;
 	font.height = op->height;
 	font.width = op->width;
-	font.data = kmalloc(size, GFP_KERNEL);
-	if (!font.data)
-		return -ENOMEM;
-	if (copy_from_user(font.data, op->data, size)) {
-		kfree(font.data);
-		return -EFAULT;
-	}
+	font.data = memdup_user(op->data, size);
+	if (IS_ERR(font.data))
+		return PTR_ERR(font.data);
 	acquire_console_sem();
 	if (vc->vc_sw->con_font_set)
 		rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2..996c1bd 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@
 static void i5000_check_error(struct mem_ctl_info *mci)
 {
 	struct i5000_error_info info;
-	debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
 	i5000_get_error_info(mci, &info);
 	i5000_process_error_info(mci, &info, 1);
 }
@@ -1353,8 +1353,8 @@
 	int num_dimms_per_channel;
 	int num_csrows;
 
-	debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
-		__func__,
+	debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+		__FILE__, __func__,
 		pdev->bus->number,
 		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 
@@ -1389,7 +1389,7 @@
 		return -ENOMEM;
 
 	kobject_get(&mci->edac_mci_kobj);
-	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+	debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
 
 	mci->dev = &pdev->dev;	/* record ptr  to the generic device */
 
@@ -1432,8 +1432,8 @@
 
 	/* add this new MC control structure to EDAC's list of MCs */
 	if (edac_mc_add_mc(mci)) {
-		debugf0("MC: " __FILE__
-			": %s(): failed edac_mc_add_mc()\n", __func__);
+		debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+			__FILE__, __func__);
 		/* FIXME: perhaps some code should go here that disables error
 		 * reporting if we just enabled it
 		 */
@@ -1478,7 +1478,7 @@
 {
 	int rc;
 
-	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+	debugf0("MC: %s: %s()\n", __FILE__, __func__);
 
 	/* wake up device */
 	rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@
 {
 	struct mem_ctl_info *mci;
 
-	debugf0(__FILE__ ": %s()\n", __func__);
+	debugf0("%s: %s()\n", __FILE__, __func__);
 
 	if (i5000_pci)
 		edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@
 {
 	int pci_rc;
 
-	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	debugf2("MC: %s: %s()\n", __FILE__, __func__);
 
        /* Ensure that the OPSTATE is set correctly for POLL or NMI */
        opstate_init();
@@ -1560,7 +1560,7 @@
  */
 static void __exit i5000_exit(void)
 {
-	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	debugf2("MC: %s: %s()\n", __FILE__, __func__);
 	pci_unregister_driver(&i5000_driver);
 }
 
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d106..010c1d6 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@
 static void i5400_check_error(struct mem_ctl_info *mci)
 {
 	struct i5400_error_info info;
-	debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
 	i5400_get_error_info(mci, &info);
 	i5400_process_error_info(mci, &info);
 }
@@ -1227,8 +1227,8 @@
 	if (dev_idx >= ARRAY_SIZE(i5400_devs))
 		return -EINVAL;
 
-	debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
-		__func__,
+	debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+		__FILE__, __func__,
 		pdev->bus->number,
 		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 
@@ -1256,7 +1256,7 @@
 	if (mci == NULL)
 		return -ENOMEM;
 
-	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+	debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
 
 	mci->dev = &pdev->dev;	/* record ptr  to the generic device */
 
@@ -1299,8 +1299,8 @@
 
 	/* add this new MC control structure to EDAC's list of MCs */
 	if (edac_mc_add_mc(mci)) {
-		debugf0("MC: " __FILE__
-			": %s(): failed edac_mc_add_mc()\n", __func__);
+		debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+			__FILE__, __func__);
 		/* FIXME: perhaps some code should go here that disables error
 		 * reporting if we just enabled it
 		 */
@@ -1344,7 +1344,7 @@
 {
 	int rc;
 
-	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+	debugf0("MC: %s: %s()\n", __FILE__, __func__);
 
 	/* wake up device */
 	rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@
 {
 	struct mem_ctl_info *mci;
 
-	debugf0(__FILE__ ": %s()\n", __func__);
+	debugf0("%s: %s()\n", __FILE__, __func__);
 
 	if (i5400_pci)
 		edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@
 {
 	int pci_rc;
 
-	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	debugf2("MC: %s: %s()\n", __FILE__, __func__);
 
 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
 	opstate_init();
@@ -1425,7 +1425,7 @@
  */
 static void __exit i5400_exit(void)
 {
-	debugf2("MC: " __FILE__ ": %s()\n", __func__);
+	debugf2("MC: %s: %s()\n", __FILE__, __func__);
 	pci_unregister_driver(&i5400_driver);
 }
 
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 2bf2c50..a2fa1fee 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@
 {
 	struct i82443bxgx_edacmc_error_info info;
 
-	debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+	debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
 	i82443bxgx_edacmc_get_error_info(mci, &info);
 	i82443bxgx_edacmc_process_error_info(mci, &info, 1);
 }
@@ -198,13 +198,13 @@
 	for (index = 0; index < mci->nr_csrows; index++) {
 		csrow = &mci->csrows[index];
 		pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
-		debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
-			mci->mc_idx, __func__, index, drbar);
+		debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
+			mci->mc_idx, __FILE__, __func__, index, drbar);
 		row_high_limit = ((u32) drbar << 23);
 		/* find the DRAM Chip Select Base address and mask */
-		debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
-			"Boundry Address=%#0x, Last = %#0x \n",
-			mci->mc_idx, __func__, index, row_high_limit,
+		debugf1("MC%d: %s: %s() Row=%d, "
+			"Boundry Address=%#0x, Last = %#0x\n",
+			mci->mc_idx, __FILE__, __func__, index, row_high_limit,
 			row_high_limit_last);
 
 		/* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@
 	enum mem_type mtype;
 	enum edac_type edac_mode;
 
-	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+	debugf0("MC: %s: %s()\n", __FILE__, __func__);
 
 	/* Something is really hosed if PCI config space reads from
 	 * the MC aren't working.
@@ -250,7 +250,7 @@
 	if (mci == NULL)
 		return -ENOMEM;
 
-	debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+	debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
 	mci->dev = &pdev->dev;
 	mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@
 			__func__);
 	}
 
-	debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+	debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
 	return 0;
 
 fail:
@@ -352,7 +352,7 @@
 {
 	int rc;
 
-	debugf0("MC: " __FILE__ ": %s()\n", __func__);
+	debugf0("MC: %s: %s()\n", __FILE__, __func__);
 
 	/* don't need to call pci_enable_device() */
 	rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -367,7 +367,7 @@
 {
 	struct mem_ctl_info *mci;
 
-	debugf0(__FILE__ ": %s()\n", __func__);
+	debugf0("%s: %s()\n", __FILE__, __func__);
 
 	if (i82443bxgx_pci)
 		edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f..4fd0f27 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@
 	  Board setup code must specify the model to use, and the start
 	  number for these GPIOs.
 
+config GPIO_MAX732X_IRQ
+	bool "Interrupt controller support for MAX732x"
+	depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
+	help
+	  Say yes here to enable the max732x to be used as an interrupt
+	  controller. It requires the driver to be built in the kernel.
+
 config GPIO_PCA953X
 	tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
 	depends on I2C
@@ -264,10 +271,10 @@
 	  If unsure, say N.
 
 config GPIO_LANGWELL
-	bool "Intel Moorestown Platform Langwell GPIO support"
+	bool "Intel Langwell/Penwell GPIO support"
 	depends on PCI
 	help
-	  Say Y here to support Intel Moorestown platform GPIO.
+	  Say Y here to support Intel Langwell/Penwell GPIO.
 
 config GPIO_TIMBERDALE
 	bool "Support for timberdale GPIO IP"
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498..f73a155 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@
 	return 0;
 }
 
-static char *cs5535_gpio_names[] = {
+static const char * const cs5535_gpio_names[] = {
 	"GPIO0", "GPIO1", "GPIO2", "GPIO3",
 	"GPIO4", "GPIO5", "GPIO6", "GPIO7",
 	"GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c..3ca3654 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@
 	unsigned long		flags;
 	struct gpio_desc	*desc;
 	int			status = -EINVAL;
-	char			*ioname = NULL;
+	const char		*ioname = NULL;
 
 	/* can't export until sysfs is available ... */
 	if (!gpio_class.p) {
@@ -753,7 +753,7 @@
 		struct device	*dev;
 
 		dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
-				desc, ioname ? ioname : "gpio%d", gpio);
+				desc, ioname ? ioname : "gpio%u", gpio);
 		if (!IS_ERR(dev)) {
 			status = sysfs_create_group(&dev->kobj,
 						&gpio_attr_group);
@@ -1106,7 +1106,7 @@
 fail:
 	/* failures here can mean systems won't boot... */
 	if (status)
-		pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+		pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
 			chip->base, chip->base + chip->ngpio - 1,
 			chip->label ? : "generic");
 	return status;
@@ -1447,6 +1447,49 @@
 }
 EXPORT_SYMBOL_GPL(gpio_direction_output);
 
+/**
+ * gpio_set_debounce - sets @debounce time for a @gpio
+ * @gpio: the gpio to set debounce time
+ * @debounce: debounce time is microseconds
+ */
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+	unsigned long		flags;
+	struct gpio_chip	*chip;
+	struct gpio_desc	*desc = &gpio_desc[gpio];
+	int			status = -EINVAL;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	if (!gpio_is_valid(gpio))
+		goto fail;
+	chip = desc->chip;
+	if (!chip || !chip->set || !chip->set_debounce)
+		goto fail;
+	gpio -= chip->base;
+	if (gpio >= chip->ngpio)
+		goto fail;
+	status = gpio_ensure_requested(desc, gpio);
+	if (status < 0)
+		goto fail;
+
+	/* now we know the gpio is valid and chip won't vanish */
+
+	spin_unlock_irqrestore(&gpio_lock, flags);
+
+	might_sleep_if(extra_checks && chip->can_sleep);
+
+	return chip->set_debounce(chip, gpio, debounce);
+
+fail:
+	spin_unlock_irqrestore(&gpio_lock, flags);
+	if (status)
+		pr_debug("%s: gpio-%d status %d\n",
+			__func__, gpio, status);
+
+	return status;
+}
+EXPORT_SYMBOL_GPL(gpio_set_debounce);
 
 /* I/O calls are only valid after configuration completed; the relevant
  * "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388..48fc43c 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@
 static void __exit it8761e_gpio_exit(void)
 {
 	if (gpio_ba) {
-		gpiochip_remove(&it8761e_gpio_chip);
+		int ret = gpiochip_remove(&it8761e_gpio_chip);
+
+		WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
+				__func__, ret);
 
 		release_region(gpio_ba, GPIO_IOSIZE);
 		gpio_ba = 0;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14..8383a8d 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
 
 /* Supports:
  * Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
  */
 
 #include <linux/module.h>
@@ -31,44 +32,65 @@
 #include <linux/gpio.h>
 #include <linux/slab.h>
 
-struct lnw_gpio_register {
-	u32	GPLR[2];
-	u32	GPDR[2];
-	u32	GPSR[2];
-	u32	GPCR[2];
-	u32	GRER[2];
-	u32	GFER[2];
-	u32	GEDR[2];
+/*
+ * Langwell chip has 64 pins and thus there are 2 32bit registers to control
+ * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
+ * registers to control them, so we only define the order here instead of a
+ * structure, to get a bit offset for a pin (use GPDR as an example):
+ *
+ * nreg = ngpio / 32;
+ * reg = offset / 32;
+ * bit = offset % 32;
+ * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
+ *
+ * so the bit of reg_addr is to control pin offset's GPDR feature
+*/
+
+enum GPIO_REG {
+	GPLR = 0,	/* pin level read-only */
+	GPDR,		/* pin direction */
+	GPSR,		/* pin set */
+	GPCR,		/* pin clear */
+	GRER,		/* rising edge detect */
+	GFER,		/* falling edge detect */
+	GEDR,		/* edge detect result */
 };
 
 struct lnw_gpio {
 	struct gpio_chip		chip;
-	struct lnw_gpio_register 	*reg_base;
+	void				*reg_base;
 	spinlock_t			lock;
 	unsigned			irq_base;
 };
 
-static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
+			enum GPIO_REG reg_type)
 {
 	struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+	unsigned nreg = chip->ngpio / 32;
 	u8 reg = offset / 32;
-	void __iomem *gplr;
+	void __iomem *ptr;
 
-	gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
+	ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+	return ptr;
+}
+
+static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	void __iomem *gplr = gpio_reg(chip, offset, GPLR);
+
 	return readl(gplr) & BIT(offset % 32);
 }
 
 static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
-	u8 reg = offset / 32;
 	void __iomem *gpsr, *gpcr;
 
 	if (value) {
-		gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]);
+		gpsr = gpio_reg(chip, offset, GPSR);
 		writel(BIT(offset % 32), gpsr);
 	} else {
-		gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]);
+		gpcr = gpio_reg(chip, offset, GPCR);
 		writel(BIT(offset % 32), gpcr);
 	}
 }
@@ -76,12 +98,10 @@
 static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
 	struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
-	u8 reg = offset / 32;
+	void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
 	u32 value;
 	unsigned long flags;
-	void __iomem *gpdr;
 
-	gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
 	spin_lock_irqsave(&lnw->lock, flags);
 	value = readl(gpdr);
 	value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@
 			unsigned offset, int value)
 {
 	struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
-	u8 reg = offset / 32;
+	void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
 	unsigned long flags;
-	void __iomem *gpdr;
 
 	lnw_gpio_set(chip, offset, value);
-	gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
 	spin_lock_irqsave(&lnw->lock, flags);
 	value = readl(gpdr);
 	value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@
 {
 	struct lnw_gpio *lnw = get_irq_chip_data(irq);
 	u32 gpio = irq - lnw->irq_base;
-	u8 reg = gpio / 32;
 	unsigned long flags;
 	u32 value;
-	void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
-	void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
+	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
 
 	if (gpio >= lnw->chip.ngpio)
 		return -EINVAL;
@@ -158,8 +175,10 @@
 	.set_type	= lnw_irq_type,
 };
 
-static struct pci_device_id lnw_gpio_ids[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) },
+static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
-	u32 reg, gpio;
+	u32 base, gpio;
 	void __iomem *gedr;
 	u32 gedr_v;
 
 	/* check GPIO controller to check which pin triggered the interrupt */
-	for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) {
-		gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+	for (base = 0; base < lnw->chip.ngpio; base += 32) {
+		gedr = gpio_reg(&lnw->chip, base, GEDR);
 		gedr_v = readl(gedr);
 		if (!gedr_v)
 			continue;
-		for (gpio = reg*32; gpio < reg*32+32; gpio++)
+		for (gpio = base; gpio < base + 32; gpio++)
 			if (gedr_v & BIT(gpio % 32)) {
 				pr_debug("pin %d triggered\n", gpio);
 				generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@
 	lnw->chip.set = lnw_gpio_set;
 	lnw->chip.to_irq = lnw_gpio_to_irq;
 	lnw->chip.base = gpio_base;
-	lnw->chip.ngpio = 64;
+	lnw->chip.ngpio = id->driver_data;
 	lnw->chip.can_sleep = 0;
 	pci_set_drvdata(pdev, lnw);
 	retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f786824..9cad60f 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/gpio.h>
-
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/i2c.h>
 #include <linux/i2c/max732x.h>
 
@@ -31,7 +32,8 @@
  *   - Open Drain I/O
  *
  * designated by 'O', 'I' and 'P' individually according to MAXIM's
- * datasheets.
+ * datasheets. 'I' and 'P' ports are interrupt capables, some with
+ * a dedicated interrupt mask.
  *
  * There are two groups of I/O ports, each group usually includes
  * up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
  *
  * Within each group of ports, there are five known combinations of
  * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
- * the detailed organization of these ports.
+ * the detailed organization of these ports. Only Goup A is interrupt
+ * capable.
  *
  * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
  * and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
 #define GROUP_A(x)	((x) & 0xffff)	/* I2C Addr: 0b'110xxxx */
 #define GROUP_B(x)	((x) << 16)	/* I2C Addr: 0b'101xxxx */
 
+#define INT_NONE	0x0	/* No interrupt capability */
+#define INT_NO_MASK	0x1	/* Has interrupts, no mask */
+#define INT_INDEP_MASK	0x2	/* Has interrupts, independent mask */
+#define INT_MERGED_MASK 0x3	/* Has interrupts, merged mask */
+
+#define INT_CAPS(x)	(((uint64_t)(x)) << 32)
+
+enum {
+	MAX7319,
+	MAX7320,
+	MAX7321,
+	MAX7322,
+	MAX7323,
+	MAX7324,
+	MAX7325,
+	MAX7326,
+	MAX7327,
+};
+
+static uint64_t max732x_features[] = {
+	[MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
+	[MAX7320] = GROUP_B(IO_8O),
+	[MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
+	[MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
+	[MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
+	[MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+	[MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+	[MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+	[MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+};
+
 static const struct i2c_device_id max732x_id[] = {
-	{ "max7319", GROUP_A(IO_8I) },
-	{ "max7320", GROUP_B(IO_8O) },
-	{ "max7321", GROUP_A(IO_8P) },
-	{ "max7322", GROUP_A(IO_4I4O) },
-	{ "max7323", GROUP_A(IO_4P4O) },
-	{ "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) },
-	{ "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) },
-	{ "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) },
-	{ "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) },
+	{ "max7319", MAX7319 },
+	{ "max7320", MAX7320 },
+	{ "max7321", MAX7321 },
+	{ "max7322", MAX7322 },
+	{ "max7323", MAX7323 },
+	{ "max7324", MAX7324 },
+	{ "max7325", MAX7325 },
+	{ "max7326", MAX7326 },
+	{ "max7327", MAX7327 },
 	{ },
 };
 MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@
 
 	struct mutex	lock;
 	uint8_t		reg_out[2];
+
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+	struct mutex	irq_lock;
+	int		irq_base;
+	uint8_t		irq_mask;
+	uint8_t		irq_mask_cur;
+	uint8_t		irq_trig_raise;
+	uint8_t		irq_trig_fall;
+	uint8_t		irq_features;
+#endif
 };
 
-static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
+static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
 {
 	struct i2c_client *client;
 	int ret;
@@ -113,7 +157,7 @@
 	return 0;
 }
 
-static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val)
+static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
 {
 	struct i2c_client *client;
 	int ret;
@@ -142,7 +186,7 @@
 
 	chip = container_of(gc, struct max732x_chip, gpio_chip);
 
-	ret = max732x_read(chip, is_group_a(chip, off), &reg_val);
+	ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
 	if (ret < 0)
 		return 0;
 
@@ -162,7 +206,7 @@
 	reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
 	reg_out = (val) ? reg_out | mask : reg_out & ~mask;
 
-	ret = max732x_write(chip, is_group_a(chip, off), reg_out);
+	ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
 	if (ret < 0)
 		goto out;
 
@@ -188,6 +232,13 @@
 		return -EACCES;
 	}
 
+	/*
+	 * Open-drain pins must be set to high impedance (which is
+	 * equivalent to output-high) to be turned into an input.
+	 */
+	if ((mask & chip->dir_output))
+		max732x_gpio_set_value(gc, off, 1);
+
 	return 0;
 }
 
@@ -209,12 +260,278 @@
 	return 0;
 }
 
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+static int max732x_writew(struct max732x_chip *chip, uint16_t val)
+{
+	int ret;
+
+	val = cpu_to_le16(val);
+
+	ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
+	if (ret < 0) {
+		dev_err(&chip->client_group_a->dev, "failed writing\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
+{
+	int ret;
+
+	ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
+	if (ret < 0) {
+		dev_err(&chip->client_group_a->dev, "failed reading\n");
+		return ret;
+	}
+
+	*val = le16_to_cpu(*val);
+	return 0;
+}
+
+static void max732x_irq_update_mask(struct max732x_chip *chip)
+{
+	uint16_t msg;
+
+	if (chip->irq_mask == chip->irq_mask_cur)
+		return;
+
+	chip->irq_mask = chip->irq_mask_cur;
+
+	if (chip->irq_features == INT_NO_MASK)
+		return;
+
+	mutex_lock(&chip->lock);
+
+	switch (chip->irq_features) {
+	case INT_INDEP_MASK:
+		msg = (chip->irq_mask << 8) | chip->reg_out[0];
+		max732x_writew(chip, msg);
+		break;
+
+	case INT_MERGED_MASK:
+		msg = chip->irq_mask | chip->reg_out[0];
+		max732x_writeb(chip, 1, (uint8_t)msg);
+		break;
+	}
+
+	mutex_unlock(&chip->lock);
+}
+
+static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+	struct max732x_chip *chip;
+
+	chip = container_of(gc, struct max732x_chip, gpio_chip);
+	return chip->irq_base + off;
+}
+
+static void max732x_irq_mask(unsigned int irq)
+{
+	struct max732x_chip *chip = get_irq_chip_data(irq);
+
+	chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+}
+
+static void max732x_irq_unmask(unsigned int irq)
+{
+	struct max732x_chip *chip = get_irq_chip_data(irq);
+
+	chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+}
+
+static void max732x_irq_bus_lock(unsigned int irq)
+{
+	struct max732x_chip *chip = get_irq_chip_data(irq);
+
+	mutex_lock(&chip->irq_lock);
+	chip->irq_mask_cur = chip->irq_mask;
+}
+
+static void max732x_irq_bus_sync_unlock(unsigned int irq)
+{
+	struct max732x_chip *chip = get_irq_chip_data(irq);
+
+	max732x_irq_update_mask(chip);
+	mutex_unlock(&chip->irq_lock);
+}
+
+static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+{
+	struct max732x_chip *chip = get_irq_chip_data(irq);
+	uint16_t off = irq - chip->irq_base;
+	uint16_t mask = 1 << off;
+
+	if (!(mask & chip->dir_input)) {
+		dev_dbg(&chip->client->dev, "%s port %d is output only\n",
+			chip->client->name, off);
+		return -EACCES;
+	}
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
+			irq, type);
+		return -EINVAL;
+	}
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		chip->irq_trig_fall |= mask;
+	else
+		chip->irq_trig_fall &= ~mask;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		chip->irq_trig_raise |= mask;
+	else
+		chip->irq_trig_raise &= ~mask;
+
+	return max732x_gpio_direction_input(&chip->gpio_chip, off);
+}
+
+static struct irq_chip max732x_irq_chip = {
+	.name			= "max732x",
+	.mask			= max732x_irq_mask,
+	.unmask			= max732x_irq_unmask,
+	.bus_lock		= max732x_irq_bus_lock,
+	.bus_sync_unlock	= max732x_irq_bus_sync_unlock,
+	.set_type		= max732x_irq_set_type,
+};
+
+static uint8_t max732x_irq_pending(struct max732x_chip *chip)
+{
+	uint8_t cur_stat;
+	uint8_t old_stat;
+	uint8_t trigger;
+	uint8_t pending;
+	uint16_t status;
+	int ret;
+
+	ret = max732x_readw(chip, &status);
+	if (ret)
+		return 0;
+
+	trigger = status >> 8;
+	trigger &= chip->irq_mask;
+
+	if (!trigger)
+		return 0;
+
+	cur_stat = status & 0xFF;
+	cur_stat &= chip->irq_mask;
+
+	old_stat = cur_stat ^ trigger;
+
+	pending = (old_stat & chip->irq_trig_fall) |
+		  (cur_stat & chip->irq_trig_raise);
+	pending &= trigger;
+
+	return pending;
+}
+
+static irqreturn_t max732x_irq_handler(int irq, void *devid)
+{
+	struct max732x_chip *chip = devid;
+	uint8_t pending;
+	uint8_t level;
+
+	pending = max732x_irq_pending(chip);
+
+	if (!pending)
+		return IRQ_HANDLED;
+
+	do {
+		level = __ffs(pending);
+		handle_nested_irq(level + chip->irq_base);
+
+		pending &= ~(1 << level);
+	} while (pending);
+
+	return IRQ_HANDLED;
+}
+
+static int max732x_irq_setup(struct max732x_chip *chip,
+			     const struct i2c_device_id *id)
+{
+	struct i2c_client *client = chip->client;
+	struct max732x_platform_data *pdata = client->dev.platform_data;
+	int has_irq = max732x_features[id->driver_data] >> 32;
+	int ret;
+
+	if (pdata->irq_base && has_irq != INT_NONE) {
+		int lvl;
+
+		chip->irq_base = pdata->irq_base;
+		chip->irq_features = has_irq;
+		mutex_init(&chip->irq_lock);
+
+		for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
+			int irq = lvl + chip->irq_base;
+
+			if (!(chip->dir_input & (1 << lvl)))
+				continue;
+
+			set_irq_chip_data(irq, chip);
+			set_irq_chip_and_handler(irq, &max732x_irq_chip,
+						 handle_edge_irq);
+			set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+			set_irq_flags(irq, IRQF_VALID);
+#else
+			set_irq_noprobe(irq);
+#endif
+		}
+
+		ret = request_threaded_irq(client->irq,
+					   NULL,
+					   max732x_irq_handler,
+					   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					   dev_name(&client->dev), chip);
+		if (ret) {
+			dev_err(&client->dev, "failed to request irq %d\n",
+				client->irq);
+			goto out_failed;
+		}
+
+		chip->gpio_chip.to_irq = max732x_gpio_to_irq;
+	}
+
+	return 0;
+
+out_failed:
+	chip->irq_base = 0;
+	return ret;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+	if (chip->irq_base)
+		free_irq(chip->client->irq, chip);
+}
+#else /* CONFIG_GPIO_MAX732X_IRQ */
+static int max732x_irq_setup(struct max732x_chip *chip,
+			     const struct i2c_device_id *id)
+{
+	struct i2c_client *client = chip->client;
+	struct max732x_platform_data *pdata = client->dev.platform_data;
+	int has_irq = max732x_features[id->driver_data] >> 32;
+
+	if (pdata->irq_base && has_irq != INT_NONE)
+		dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+	return 0;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+}
+#endif
+
 static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
 					const struct i2c_device_id *id,
 					unsigned gpio_start)
 {
 	struct gpio_chip *gc = &chip->gpio_chip;
-	uint32_t id_data = id->driver_data;
+	uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
 	int i, port = 0;
 
 	for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@
 	switch (client->addr & 0x70) {
 	case 0x60:
 		chip->client_group_a = client;
-		if (nr_port > 7) {
+		if (nr_port > 8) {
 			c = i2c_new_dummy(client->adapter, addr_b);
 			chip->client_group_b = chip->client_dummy = c;
 		}
 		break;
 	case 0x50:
 		chip->client_group_b = client;
-		if (nr_port > 7) {
+		if (nr_port > 8) {
 			c = i2c_new_dummy(client->adapter, addr_a);
 			chip->client_group_a = chip->client_dummy = c;
 		}
@@ -306,9 +623,13 @@
 
 	mutex_init(&chip->lock);
 
-	max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]);
-	if (nr_port > 7)
-		max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+	max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+	if (nr_port > 8)
+		max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+
+	ret = max732x_irq_setup(chip, id);
+	if (ret)
+		goto out_failed;
 
 	ret = gpiochip_add(&chip->gpio_chip);
 	if (ret)
@@ -325,6 +646,7 @@
 	return 0;
 
 out_failed:
+	max732x_irq_teardown(chip);
 	kfree(chip);
 	return ret;
 }
@@ -352,6 +674,8 @@
 		return ret;
 	}
 
+	max732x_irq_teardown(chip);
+
 	/* unregister any dummy i2c_client */
 	if (chip->client_dummy)
 		i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index f156ab3..a2b12aa 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@
 	struct i2c_client *client;
 	struct pca953x_platform_data *dyn_pdata;
 	struct gpio_chip gpio_chip;
-	char **names;
+	const char *const *names;
 };
 
 static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a..ee568c8 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@
 	unsigned long flags;
 	u8 gpiois, gpioibe, gpioiev;
 
-	if (offset < 0 || offset > PL061_GPIO_NR)
+	if (offset < 0 || offset >= PL061_GPIO_NR)
 		return -EINVAL;
 
 	spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f569ae8..c198186 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@
 		csum += raw_edid[i];
 	if (csum) {
 		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
-		goto bad;
+
+		/* allow CEA to slide through, switches mangle this */
+		if (raw_edid[0] != 0x02)
+			goto bad;
 	}
 
 	/* per-block-type checks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e663a7..266b0ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@
 	if (nv_encoder && nv_connector->native_mode) {
 		unsigned status = connector_status_connected;
 
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+	(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
 		if (!nouveau_ignorelid && !acpi_lid_open())
 			status = connector_status_unknown;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96..704a25d 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@
 
 	if (!dev_priv->engine.graph.ctxprog) {
 		struct nouveau_grctx ctx = {};
-		uint32_t cp[256];
+		uint32_t *cp;
+
+		cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+		if (!cp)
+			return -ENOMEM;
 
 		ctx.dev = dev;
 		ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@
 		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
 		for (i = 0; i < ctx.ctxprog_len; i++)
 			nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+		kfree(cp);
 	}
 
 	/* No context present currently */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c4..f3f2827 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@
 		break;
 	case ATOM_DCPLL:
 	case ATOM_PPLL_INVALID:
+	default:
 		pll = &rdev->clock.dcpll;
 		break;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 66a37fb..669feb6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -576,6 +576,7 @@
  */
 int radeon_agp_init(struct radeon_device *rdev);
 void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
 void radeon_agp_fini(struct radeon_device *rdev);
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f..f40dfb7 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@
 	}
 #endif
 }
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+	radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fd..24ea683 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,11 +680,19 @@
 	uint8_t dac;
 	union atom_supported_devices *supported_devices;
 	int i, j, max_device;
-	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+	struct bios_connector *bios_connectors;
+	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
 
-	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+	if (!bios_connectors)
 		return false;
 
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+				    &data_offset)) {
+		kfree(bios_connectors);
+		return false;
+	}
+
 	supported_devices =
 	    (union atom_supported_devices *)(ctx->bios + data_offset);
 
@@ -851,6 +859,7 @@
 
 	radeon_link_encoder_connector(dev);
 
+	kfree(bios_connectors);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612..fdc3fdf7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -754,6 +754,8 @@
 	/* evict remaining vram memory */
 	radeon_bo_evict_vram(rdev);
 
+	radeon_agp_suspend(rdev);
+
 	pci_save_state(dev->pdev);
 	if (state.event == PM_EVENT_SUSPEND) {
 		/* Shut down the device */
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314f..c940267 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@
 	[REL_WHEEL] = "Wheel",		[REL_MISC] = "Misc",
 };
 
-static const char *absolutes[ABS_MAX + 1] = {
+static const char *absolutes[ABS_CNT] = {
 	[ABS_X] = "X",			[ABS_Y] = "Y",
 	[ABS_Z] = "Z",			[ABS_RX] = "Rx",
 	[ABS_RY] = "Ry",		[ABS_RZ] = "Rz",
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae55..e571e60 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@
 		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
 		if (!create_comp_task(pool, cpu)) {
 			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
 		}
 		break;
 	case CPU_UP_CANCELED:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6..34157bb 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@
 	struct mutex mutex;
 	struct device dev;
 
-	struct js_corr corr[ABS_MAX + 1];
+	struct js_corr corr[ABS_CNT];
 	struct JS_DATA_SAVE_TYPE glue;
 	int nabs;
 	int nkey;
 	__u16 keymap[KEY_MAX - BTN_MISC + 1];
 	__u16 keypam[KEY_MAX - BTN_MISC + 1];
-	__u8 absmap[ABS_MAX + 1];
-	__u8 abspam[ABS_MAX + 1];
-	__s16 abs[ABS_MAX + 1];
+	__u8 absmap[ABS_CNT];
+	__u8 abspam[ABS_CNT];
+	__s16 abs[ABS_CNT];
 };
 
 struct joydev_client {
@@ -826,7 +826,7 @@
 	joydev->handle.handler = handler;
 	joydev->handle.private = joydev;
 
-	for (i = 0; i < ABS_MAX + 1; i++)
+	for (i = 0; i < ABS_CNT; i++)
 		if (test_bit(i, dev->absbit)) {
 			joydev->absmap[i] = joydev->nabs;
 			joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabe..c44b9ea 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@
 	tristate "M68k Beeper support"
 	depends on M68K
 
+config INPUT_MAX8925_ONKEY
+	tristate "MAX8925 ONKEY support"
+	depends on MFD_MAX8925
+	help
+	  Support the ONKEY of MAX8925 PMICs as an input device
+	  reporting power button status.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called max8925_onkey.
+
 config INPUT_APANEL
 	tristate "Fujitsu Lifebook Application Panel buttons"
 	depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f5770..71fe57d 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
+obj-$(CONFIG_INPUT_MAX8925_ONKEY)	+= max8925_onkey.o
 obj-$(CONFIG_INPUT_PCAP)		+= pcap_keys.o
 obj-$(CONFIG_INPUT_PCF50633_PMU)	+= pcf50633-input.o
 obj-$(CONFIG_INPUT_PCF8574)		+= pcf8574_keypad.o
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 0000000..80af446
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
+/**
+ * max8925_onkey.c - MAX8925 ONKEY driver
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ *      Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max8925.h>
+#include <linux/slab.h>
+
+#define HARDRESET_EN		(1 << 7)
+#define PWREN_EN		(1 << 7)
+
+struct max8925_onkey_info {
+	struct input_dev	*idev;
+	struct i2c_client	*i2c;
+	int			irq;
+};
+
+/*
+ * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
+ * max8925_set_bits() operates I2C bus and may sleep. So implement
+ * it in thread IRQ handler.
+ */
+static irqreturn_t max8925_onkey_handler(int irq, void *data)
+{
+	struct max8925_onkey_info *info = data;
+
+	input_report_key(info->idev, KEY_POWER, 1);
+	input_sync(info->idev);
+
+	/* Enable hardreset to halt if system isn't shutdown on time */
+	max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
+			 HARDRESET_EN, HARDRESET_EN);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+{
+	struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+	struct max8925_onkey_info *info;
+	int error;
+
+	info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->i2c = chip->i2c;
+	info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
+
+	info->idev = input_allocate_device();
+	if (!info->idev) {
+		dev_err(chip->dev, "Failed to allocate input dev\n");
+		error = -ENOMEM;
+		goto out_input;
+	}
+
+	info->idev->name = "max8925_on";
+	info->idev->phys = "max8925_on/input0";
+	info->idev->id.bustype = BUS_I2C;
+	info->idev->dev.parent = &pdev->dev;
+	info->idev->evbit[0] = BIT_MASK(EV_KEY);
+	info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
+
+	error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
+				     IRQF_ONESHOT, "onkey", info);
+	if (error < 0) {
+		dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+			info->irq, error);
+		goto out_irq;
+	}
+
+	error = input_register_device(info->idev);
+	if (error) {
+		dev_err(chip->dev, "Can't register input device: %d\n", error);
+		goto out;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	return 0;
+
+out:
+	free_irq(info->irq, info);
+out_irq:
+	input_free_device(info->idev);
+out_input:
+	kfree(info);
+	return error;
+}
+
+static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+{
+	struct max8925_onkey_info *info = platform_get_drvdata(pdev);
+
+	free_irq(info->irq, info);
+	input_unregister_device(info->idev);
+	kfree(info);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver max8925_onkey_driver = {
+	.driver		= {
+		.name	= "max8925-onkey",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= max8925_onkey_probe,
+	.remove		= __devexit_p(max8925_onkey_remove),
+};
+
+static int __init max8925_onkey_init(void)
+{
+	return platform_driver_register(&max8925_onkey_driver);
+}
+module_init(max8925_onkey_init);
+
+static void __exit max8925_onkey_exit(void)
+{
+	platform_driver_unregister(&max8925_onkey_driver);
+}
+module_exit(max8925_onkey_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac..4f9b2af 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@
 	twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
 			 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
 
-	twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
 	twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
+	twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
 
 	info->enabled = false;
 }
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466..b71eb55 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@
 	unsigned int cnt;
 	int retval = 0;
 
-	for (cnt = 0; cnt < ABS_MAX + 1; cnt++) {
+	for (cnt = 0; cnt < ABS_CNT; cnt++) {
 		if (!test_bit(cnt, dev->absbit))
 			continue;
 
@@ -387,7 +387,7 @@
 	dev->id.product	= user_dev->id.product;
 	dev->id.version	= user_dev->id.version;
 
-	size = sizeof(int) * (ABS_MAX + 1);
+	size = sizeof(int) * ABS_CNT;
 	memcpy(dev->absmax, user_dev->absmax, size);
 	memcpy(dev->absmin, user_dev->absmin, size);
 	memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279c..634f6f6 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@
 
 	ts->reg = regulator_get(&spi->dev, "vcc");
 	if (IS_ERR(ts->reg)) {
-		dev_err(&spi->dev, "unable to get regulator: %ld\n",
-			PTR_ERR(ts->reg));
+		err = PTR_ERR(ts->reg);
+		dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
 		goto err_free_gpio;
 	}
 
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c83..ac5d0f9 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -413,6 +413,8 @@
 #endif
 
 static struct platform_device_id s3cts_driver_ids[] = {
+	{ "s3c2410-ts", 0 },
+	{ "s3c2440-ts", 0 },
 	{ "s3c64xx-ts", FEAT_PEN_IRQ },
 	{ }
 };
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf..567d572 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@
 	if ((pkt[0] & 0xe0) != 0xe0)
 		return 0;
 
+	if (be16_to_cpu(packet->data_len) > 0xff)
+		packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
+	if (be16_to_cpu(packet->x_len) > 0xff)
+		packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
+
 	/* send ACK */
 	ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
 
@@ -1112,7 +1117,7 @@
 
 #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
 	[DEVTYPE_NEXIO] = {
-		.rept_size	= 128,
+		.rept_size	= 1024,
 		.irq_always	= true,
 		.read_data	= nexio_read_data,
 		.init		= nexio_init,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c3243c9..81048b8 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -98,8 +98,6 @@
 	if (*debug & DEBUG_TIMER)
 		printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
 			filep, buf, (int)count, off);
-	if (*off != filep->f_pos)
-		return -ESPIPE;
 
 	if (list_empty(&dev->expired) && (dev->work == 0)) {
 		if (filep->f_flags & O_NONBLOCK)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ea17d6..d2c0f94 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4645,7 +4645,7 @@
 			kfree(percpu->scribble);
 			pr_err("%s: failed memory allocation for cpu%ld\n",
 			       __func__, cpu);
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
 		}
 		break;
 	case CPU_DEAD:
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index d33693c..c4b117f 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@
 	if (!dev)
 		return -ENXIO;
 
-	ops = kmalloc(kcmd.oplen, GFP_KERNEL);
-	if (!ops)
-		return -ENOMEM;
-
-	if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
-		kfree(ops);
-		return -EFAULT;
-	}
+	ops = memdup_user(kcmd.opbuf, kcmd.oplen);
+	if (IS_ERR(ops))
+		return PTR_ERR(ops);
 
 	/*
 	 * It's possible to have a _very_ large table
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a9911..5bfb2a2 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@
 	UNALIGNED_LOAD_STORE_WRITE,
 	OVERWRITE_ALLOCATION,
 	WRITE_AFTER_FREE,
+	SOFTLOCKUP,
+	HARDLOCKUP,
+	HUNG_TASK,
 };
 
 static char* cp_name[] = {
@@ -99,6 +102,9 @@
 	"UNALIGNED_LOAD_STORE_WRITE",
 	"OVERWRITE_ALLOCATION",
 	"WRITE_AFTER_FREE",
+	"SOFTLOCKUP",
+	"HARDLOCKUP",
+	"HUNG_TASK",
 };
 
 static struct jprobe lkdtm;
@@ -320,6 +326,20 @@
 		memset(data, 0x78, len);
 		break;
 	}
+	case SOFTLOCKUP:
+		preempt_disable();
+		for (;;)
+			cpu_relax();
+		break;
+	case HARDLOCKUP:
+		local_irq_disable();
+		for (;;)
+			cpu_relax();
+		break;
+	case HUNG_TASK:
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule();
+		break;
 	case NONE:
 	default:
 		break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd..569e94d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@
 /**
  *	mmc_suspend_host - suspend a host
  *	@host: mmc host
- *	@state: suspend mode (PM_SUSPEND_xxx)
  */
-int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
+int mmc_suspend_host(struct mmc_host *host)
 {
 	int err = 0;
 
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080..63772e7 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@
 	 * we cannot use the retries field in mmc_command.
 	 */
 	for (i = 0;i <= retries;i++) {
-		memset(&mrq, 0, sizeof(struct mmc_request));
-
 		err = mmc_app_cmd(host, card);
 		if (err) {
 			/* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c..0f687cd 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@
 EXPORT_SYMBOL_GPL(sdio_writeb);
 
 /**
+ *	sdio_writeb_readb - write and read a byte from SDIO function
+ *	@func: SDIO function to access
+ *	@write_byte: byte to write
+ *	@addr: address to write to
+ *	@err_ret: optional status value from transfer
+ *
+ *	Performs a RAW (Read after Write) operation as defined by SDIO spec -
+ *	single byte is written to address space of a given SDIO function and
+ *	response is read back from the same address, both using single request.
+ *	If there is a problem with the operation, 0xff is returned and
+ *	@err_ret will contain the error code.
+ */
+u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+	unsigned int addr, int *err_ret)
+{
+	int ret;
+	u8 val;
+
+	ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
+			write_byte, &val);
+	if (err_ret)
+		*err_ret = ret;
+	if (ret)
+		val = 0xff;
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb_readb);
+
+/**
  *	sdio_memcpy_fromio - read a chunk of memory from a SDIO function
  *	@func: SDIO function to access
  *	@dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94..e171e77 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@
 
 	  If unsure, say N.
 
+config MMC_SDHCI_SPEAR
+	tristate "SDHCI support on ST SPEAr platform"
+	depends on MMC_SDHCI && PLAT_SPEAR
+	help
+	  This selects the Secure Digital Host Controller Interface (SDHCI)
+	  often referrered to as the HSMMC block in some of the ST SPEAR range
+	  of SoC
+
+	  If you have a controller with this interface, say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_SDHCI_S3C_DMA
 	bool "DMA support on S3C SDHCI"
 	depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@
 	depends on SDH_BFIN
 	help
 	  If you say yes here SD-Cards may work on the EZkit.
+
+config MMC_SH_MMCIF
+	tristate "SuperH Internal MMCIF support"
+	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+	help
+	  This selects the MMC Host Interface controler (MMCIF).
+
+	  This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f480397..e30c2ee 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_MMC_SDHCI_PCI)	+= sdhci-pci.o
 obj-$(CONFIG_MMC_SDHCI_PLTFM)	+= sdhci-pltfm.o
 obj-$(CONFIG_MMC_SDHCI_S3C)	+= sdhci-s3c.o
+obj-$(CONFIG_MMC_SDHCI_SPEAR)	+= sdhci-spear.o
 obj-$(CONFIG_MMC_WBSD)		+= wbsd.o
 obj-$(CONFIG_MMC_AU1X)		+= au1xmmc.o
 obj-$(CONFIG_MMC_OMAP)		+= omap.o
@@ -34,6 +35,7 @@
 obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
+obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
 
 obj-$(CONFIG_MMC_SDHCI_OF)	+= sdhci-of.o
 sdhci-of-y				:= sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f5..5f3a599 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@
 		enable_irq_wake(host->board->det_pin);
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 
 	return ret;
 }
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a8..95ef864 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@
  * @mmc: The mmc_host representing this slot.
  * @host: The MMC controller this slot is using.
  * @sdc_reg: Value of SDCR to be written before using this slot.
+ * @sdio_irq: SDIO irq mask for this slot.
  * @mrq: mmc_request currently being processed or waiting to be
  *	processed, or NULL when the slot is idle.
  * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@
 	struct atmel_mci	*host;
 
 	u32			sdc_reg;
+	u32			sdio_irq;
 
 	struct mmc_request	*mrq;
 	struct list_head	queue_node;
@@ -792,7 +794,7 @@
 	mci_writel(host, SDCR, slot->sdc_reg);
 
 	iflags = mci_readl(host, IMR);
-	if (iflags)
+	if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
 		dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
 				iflags);
 
@@ -952,10 +954,21 @@
 		if (mci_has_rwproof())
 			host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
 
-		if (list_empty(&host->queue))
+		if (atmci_is_mci2()) {
+			/* setup High Speed mode in relation with card capacity */
+			if (ios->timing == MMC_TIMING_SD_HS)
+				host->cfg_reg |= MCI_CFG_HSMODE;
+			else
+				host->cfg_reg &= ~MCI_CFG_HSMODE;
+		}
+
+		if (list_empty(&host->queue)) {
 			mci_writel(host, MR, host->mode_reg);
-		else
+			if (atmci_is_mci2())
+				mci_writel(host, CFG, host->cfg_reg);
+		} else {
 			host->need_clock_update = true;
+		}
 
 		spin_unlock_bh(&host->lock);
 	} else {
@@ -1030,11 +1043,23 @@
 	return present;
 }
 
+static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct atmel_mci_slot	*slot = mmc_priv(mmc);
+	struct atmel_mci	*host = slot->host;
+
+	if (enable)
+		mci_writel(host, IER, slot->sdio_irq);
+	else
+		mci_writel(host, IDR, slot->sdio_irq);
+}
+
 static const struct mmc_host_ops atmci_ops = {
 	.request	= atmci_request,
 	.set_ios	= atmci_set_ios,
 	.get_ro		= atmci_get_ro,
 	.get_cd		= atmci_get_cd,
+	.enable_sdio_irq = atmci_enable_sdio_irq,
 };
 
 /* Called with host->lock held */
@@ -1052,8 +1077,11 @@
 	 * necessary if set_ios() is called when a different slot is
 	 * busy transfering data.
 	 */
-	if (host->need_clock_update)
+	if (host->need_clock_update) {
 		mci_writel(host, MR, host->mode_reg);
+		if (atmci_is_mci2())
+			mci_writel(host, CFG, host->cfg_reg);
+	}
 
 	host->cur_slot->mrq = NULL;
 	host->mrq = NULL;
@@ -1483,6 +1511,19 @@
 	tasklet_schedule(&host->tasklet);
 }
 
+static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
+{
+	int	i;
+
+	for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+		struct atmel_mci_slot *slot = host->slot[i];
+		if (slot && (status & slot->sdio_irq)) {
+			mmc_signal_sdio_irq(slot->mmc);
+		}
+	}
+}
+
+
 static irqreturn_t atmci_interrupt(int irq, void *dev_id)
 {
 	struct atmel_mci	*host = dev_id;
@@ -1522,6 +1563,10 @@
 
 		if (pending & MCI_CMDRDY)
 			atmci_cmd_interrupt(host, status);
+
+		if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
+			atmci_sdio_interrupt(host, status);
+
 	} while (pass_count++ < 5);
 
 	return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@
 
 static int __init atmci_init_slot(struct atmel_mci *host,
 		struct mci_slot_pdata *slot_data, unsigned int id,
-		u32 sdc_reg)
+		u32 sdc_reg, u32 sdio_irq)
 {
 	struct mmc_host			*mmc;
 	struct atmel_mci_slot		*slot;
@@ -1560,11 +1605,16 @@
 	slot->wp_pin = slot_data->wp_pin;
 	slot->detect_is_active_high = slot_data->detect_is_active_high;
 	slot->sdc_reg = sdc_reg;
+	slot->sdio_irq = sdio_irq;
 
 	mmc->ops = &atmci_ops;
 	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
 	mmc->f_max = host->bus_hz / 2;
 	mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;
+	if (sdio_irq)
+		mmc->caps |= MMC_CAP_SDIO_IRQ;
+	if (atmci_is_mci2())
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
 	if (slot_data->bus_width >= 4)
 		mmc->caps |= MMC_CAP_4_BIT_DATA;
 
@@ -1753,13 +1803,13 @@
 	ret = -ENODEV;
 	if (pdata->slot[0].bus_width) {
 		ret = atmci_init_slot(host, &pdata->slot[0],
-				0, MCI_SDCSEL_SLOT_A);
+				0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
 		if (!ret)
 			nr_slots++;
 	}
 	if (pdata->slot[1].bus_width) {
 		ret = atmci_init_slot(host, &pdata->slot[1],
-				1, MCI_SDCSEL_SLOT_B);
+				1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
 		if (!ret)
 			nr_slots++;
 	}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f583444..c8da5d3 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@
 	struct au1xmmc_host *host = platform_get_drvdata(pdev);
 	int ret;
 
-	ret = mmc_suspend_host(host->mmc, state);
+	ret = mmc_suspend_host(host->mmc);
 	if (ret)
 		return ret;
 
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e84..4b0e677 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@
 	int ret = 0;
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 
 	bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
 	peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7..ca3bdc8 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@
 	struct mmc_host *mmc = cb710_slot_to_mmc(slot);
 	int err;
 
-	err = mmc_suspend_host(mmc, state);
+	err = mmc_suspend_host(mmc);
 	if (err)
 		return err;
 
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba2..33d9f1b 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
 
 /*
  * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
- * and we handle up to NR_SG segments.  MMC_BLOCK_BOUNCE kicks in only
+ * and we handle up to MAX_NR_SG segments.  MMC_BLOCK_BOUNCE kicks in only
  * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
- * than the page or two that's otherwise typical.  NR_SG == 16 gives at
- * least the same throughput boost, using EDMA transfer linkage instead
- * of spending CPU time copying pages.
+ * than the page or two that's otherwise typical. nr_sg (passed from
+ * platform data) == 16 gives at least the same throughput boost, using
+ * EDMA transfer linkage instead of spending CPU time copying pages.
  */
 #define MAX_CCNT	((1 << 16) - 1)
 
-#define NR_SG		16
+#define MAX_NR_SG	16
 
 static unsigned rw_threshold = 32;
 module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@
 #define DAVINCI_MMC_DATADIR_READ	1
 #define DAVINCI_MMC_DATADIR_WRITE	2
 	unsigned char data_dir;
+	unsigned char suspended;
 
 	/* buffer is used during PIO of one scatterlist segment, and
 	 * is updated along with buffer_bytes_left.  bytes_left applies
@@ -192,7 +193,7 @@
 	struct edmacc_param	tx_template;
 	struct edmacc_param	rx_template;
 	unsigned		n_link;
-	u32			links[NR_SG - 1];
+	u32			links[MAX_NR_SG - 1];
 
 	/* For PIO we walk scatterlists one segment at a time. */
 	unsigned int		sg_len;
@@ -202,6 +203,8 @@
 	u8 version;
 	/* for ns in one cycle calculation */
 	unsigned ns_in_one_cycle;
+	/* Number of sg segments */
+	u8 nr_sg;
 #ifdef CONFIG_CPU_FREQ
 	struct notifier_block	freq_transition;
 #endif
@@ -568,6 +571,7 @@
 
 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
 {
+	u32 link_size;
 	int r, i;
 
 	/* Acquire master DMA write channel */
@@ -593,7 +597,8 @@
 	/* Allocate parameter RAM slots, which will later be bound to a
 	 * channel as needed to handle a scatterlist.
 	 */
-	for (i = 0; i < ARRAY_SIZE(host->links); i++) {
+	link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
+	for (i = 0; i < link_size; i++) {
 		r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
 		if (r < 0) {
 			dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@
 	}
 }
 
-static void
-davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
+								int val)
 {
 	u32 temp;
 
-	/* reset command and data state machines */
 	temp = readl(host->base + DAVINCI_MMCCTL);
-	writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
-		host->base + DAVINCI_MMCCTL);
+	if (val)	/* reset */
+		temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
+	else		/* enable */
+		temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
 
-	temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
-	udelay(10);
 	writel(temp, host->base + DAVINCI_MMCCTL);
+	udelay(10);
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+	mmc_davinci_reset_ctrl(host, 1);
+	mmc_davinci_reset_ctrl(host, 0);
 }
 
 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@
 #endif
 static void __init init_mmcsd_host(struct mmc_davinci_host *host)
 {
-	/* DAT line portion is diabled and in reset state */
-	writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
-		host->base + DAVINCI_MMCCTL);
 
-	/* CMD line portion is diabled and in reset state */
-	writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
-		host->base + DAVINCI_MMCCTL);
-
-	udelay(10);
+	mmc_davinci_reset_ctrl(host, 1);
 
 	writel(0, host->base + DAVINCI_MMCCLK);
 	writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@
 	writel(0x1FFF, host->base + DAVINCI_MMCTOR);
 	writel(0xFFFF, host->base + DAVINCI_MMCTOD);
 
-	writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
-		host->base + DAVINCI_MMCCTL);
-	writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
-		host->base + DAVINCI_MMCCTL);
-
-	udelay(10);
+	mmc_davinci_reset_ctrl(host, 0);
 }
 
 static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@
 
 	init_mmcsd_host(host);
 
+	if (pdata->nr_sg)
+		host->nr_sg = pdata->nr_sg - 1;
+
+	if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
+		host->nr_sg = MAX_NR_SG;
+
 	host->use_dma = use_dma;
 	host->irq = irq;
 
@@ -1327,32 +1333,65 @@
 }
 
 #ifdef CONFIG_PM
-static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
+static int davinci_mmcsd_suspend(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+	int ret;
 
-	return mmc_suspend_host(host->mmc, msg);
+	mmc_host_enable(host->mmc);
+	ret = mmc_suspend_host(host->mmc);
+	if (!ret) {
+		writel(0, host->base + DAVINCI_MMCIM);
+		mmc_davinci_reset_ctrl(host, 1);
+		mmc_host_disable(host->mmc);
+		clk_disable(host->clk);
+		host->suspended = 1;
+	} else {
+		host->suspended = 0;
+		mmc_host_disable(host->mmc);
+	}
+
+	return ret;
 }
 
-static int davinci_mmcsd_resume(struct platform_device *pdev)
+static int davinci_mmcsd_resume(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+	int ret;
 
-	return mmc_resume_host(host->mmc);
+	if (!host->suspended)
+		return 0;
+
+	clk_enable(host->clk);
+	mmc_host_enable(host->mmc);
+
+	mmc_davinci_reset_ctrl(host, 0);
+	ret = mmc_resume_host(host->mmc);
+	if (!ret)
+		host->suspended = 0;
+
+	return ret;
 }
+
+static const struct dev_pm_ops davinci_mmcsd_pm = {
+	.suspend        = davinci_mmcsd_suspend,
+	.resume         = davinci_mmcsd_resume,
+};
+
+#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
 #else
-#define davinci_mmcsd_suspend	NULL
-#define davinci_mmcsd_resume	NULL
+#define davinci_mmcsd_pm_ops NULL
 #endif
 
 static struct platform_driver davinci_mmcsd_driver = {
 	.driver		= {
 		.name	= "davinci_mmc",
 		.owner	= THIS_MODULE,
+		.pm	= davinci_mmcsd_pm_ops,
 	},
 	.remove		= __exit_p(davinci_mmcsd_remove),
-	.suspend	= davinci_mmcsd_suspend,
-	.resume		= davinci_mmcsd_resume,
 };
 
 static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7c..9a68ff4 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@
 	int ret = 0;
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 
 	return ret;
 }
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d9..4917af9 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@
 	if (mmc) {
 		struct mmci_host *host = mmc_priv(mmc);
 
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 		if (ret == 0)
 			writel(0, host->base + MMCIMASK0);
 	}
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27..24e0945 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@
 			disable_irq(host->stat_irq);
 
 		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
-			rc = mmc_suspend_host(mmc, state);
+			rc = mmc_suspend_host(mmc);
 		if (!rc)
 			msmsdcc_writel(host, 0, MMCIMASK0);
 		if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e2348..366eefa 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@
 	int ret = 0;
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 
 	return ret;
 }
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index ec18e3b..d9d4a72 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -932,7 +932,7 @@
 	int ret = 0;
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, state);
+		ret = mmc_suspend_host(mmc);
 
 	return ret;
 }
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d2804..2b28168 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
 #include <plat/fpga.h>
 
 #define	OMAP_MMC_REG_CMD	0x00
-#define	OMAP_MMC_REG_ARGL	0x04
-#define	OMAP_MMC_REG_ARGH	0x08
-#define	OMAP_MMC_REG_CON	0x0c
-#define	OMAP_MMC_REG_STAT	0x10
-#define	OMAP_MMC_REG_IE		0x14
-#define	OMAP_MMC_REG_CTO	0x18
-#define	OMAP_MMC_REG_DTO	0x1c
-#define	OMAP_MMC_REG_DATA	0x20
-#define	OMAP_MMC_REG_BLEN	0x24
-#define	OMAP_MMC_REG_NBLK	0x28
-#define	OMAP_MMC_REG_BUF	0x2c
-#define OMAP_MMC_REG_SDIO	0x34
-#define	OMAP_MMC_REG_REV	0x3c
-#define	OMAP_MMC_REG_RSP0	0x40
-#define	OMAP_MMC_REG_RSP1	0x44
-#define	OMAP_MMC_REG_RSP2	0x48
-#define	OMAP_MMC_REG_RSP3	0x4c
-#define	OMAP_MMC_REG_RSP4	0x50
-#define	OMAP_MMC_REG_RSP5	0x54
-#define	OMAP_MMC_REG_RSP6	0x58
-#define	OMAP_MMC_REG_RSP7	0x5c
-#define	OMAP_MMC_REG_IOSR	0x60
-#define	OMAP_MMC_REG_SYSC	0x64
-#define	OMAP_MMC_REG_SYSS	0x68
+#define	OMAP_MMC_REG_ARGL	0x01
+#define	OMAP_MMC_REG_ARGH	0x02
+#define	OMAP_MMC_REG_CON	0x03
+#define	OMAP_MMC_REG_STAT	0x04
+#define	OMAP_MMC_REG_IE		0x05
+#define	OMAP_MMC_REG_CTO	0x06
+#define	OMAP_MMC_REG_DTO	0x07
+#define	OMAP_MMC_REG_DATA	0x08
+#define	OMAP_MMC_REG_BLEN	0x09
+#define	OMAP_MMC_REG_NBLK	0x0a
+#define	OMAP_MMC_REG_BUF	0x0b
+#define	OMAP_MMC_REG_SDIO	0x0d
+#define	OMAP_MMC_REG_REV	0x0f
+#define	OMAP_MMC_REG_RSP0	0x10
+#define	OMAP_MMC_REG_RSP1	0x11
+#define	OMAP_MMC_REG_RSP2	0x12
+#define	OMAP_MMC_REG_RSP3	0x13
+#define	OMAP_MMC_REG_RSP4	0x14
+#define	OMAP_MMC_REG_RSP5	0x15
+#define	OMAP_MMC_REG_RSP6	0x16
+#define	OMAP_MMC_REG_RSP7	0x17
+#define	OMAP_MMC_REG_IOSR	0x18
+#define	OMAP_MMC_REG_SYSC	0x19
+#define	OMAP_MMC_REG_SYSS	0x1a
 
 #define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
 #define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
@@ -78,8 +78,9 @@
 #define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
 #define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
 
-#define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
-#define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_REG(host, reg)		(OMAP_MMC_REG_##reg << (host)->reg_shift)
+#define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
+#define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
 
 /*
  * Command types
@@ -133,6 +134,7 @@
 	int			irq;
 	unsigned char		bus_mode;
 	unsigned char		hw_bus_mode;
+	unsigned int		reg_shift;
 
 	struct work_struct	cmd_abort_work;
 	unsigned		abort:1;
@@ -680,9 +682,9 @@
 	host->data->bytes_xfered += n;
 
 	if (write) {
-		__raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+		__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
 	} else {
-		__raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+		__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
 	}
 }
 
@@ -900,7 +902,7 @@
 	int dst_port = 0;
 	int sync_dev = 0;
 
-	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
+	data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
 	frame = data->blksz;
 	count = sg_dma_len(sg);
 
@@ -1493,6 +1495,8 @@
 		}
 	}
 
+	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
 	return 0;
 
 err_plat_cleanup:
@@ -1557,7 +1561,7 @@
 		struct mmc_omap_slot *slot;
 
 		slot = host->slots[i];
-		ret = mmc_suspend_host(slot->mmc, mesg);
+		ret = mmc_suspend_host(slot->mmc);
 		if (ret < 0) {
 			while (--i >= 0) {
 				slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf69..b032828 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@
 	 */
 	struct	regulator	*vcc;
 	struct	regulator	*vcc_aux;
-	struct	semaphore	sem;
 	struct	work_struct	mmc_carddetect_work;
 	void	__iomem		*base;
 	resource_size_t		mapbase;
 	spinlock_t		irq_lock; /* Prevent races with irq handler */
-	unsigned long		flags;
 	unsigned int		id;
 	unsigned int		dma_len;
 	unsigned int		dma_sg_idx;
@@ -183,6 +181,7 @@
 	int			protect_card;
 	int			reqs_blocked;
 	int			use_reg;
+	int			req_in_progress;
 
 	struct	omap_mmc_platform_data	*pdata;
 };
@@ -524,6 +523,27 @@
 		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
 }
 
+static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
+{
+	unsigned int irq_mask;
+
+	if (host->use_dma)
+		irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
+	else
+		irq_mask = INT_EN_MASK;
+
+	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+	OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+}
+
+static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
+{
+	OMAP_HSMMC_WRITE(host->base, ISE, 0);
+	OMAP_HSMMC_WRITE(host->base, IE, 0);
+	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+}
+
 #ifdef CONFIG_PM
 
 /*
@@ -592,9 +612,7 @@
 		&& time_before(jiffies, timeout))
 		;
 
-	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
-	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
-	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+	omap_hsmmc_disable_irq(host);
 
 	/* Do not initialize card-specific things if the power is off */
 	if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@
 		return;
 
 	disable_irq(host->irq);
+
+	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
 	OMAP_HSMMC_WRITE(host->base, CON,
 		OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
 	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@
 		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
 	host->cmd = cmd;
 
-	/*
-	 * Clear status bits and enable interrupts
-	 */
-	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
-	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
-
-	if (host->use_dma)
-		OMAP_HSMMC_WRITE(host->base, IE,
-				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
-	else
-		OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+	omap_hsmmc_enable_irq(host);
 
 	host->response_busy = 0;
 	if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@
 	if (host->use_dma)
 		cmdreg |= DMA_EN;
 
-	/*
-	 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
-	 * by the interrupt handler, otherwise (i.e. for a new request) it is
-	 * unlocked here.
-	 */
-	if (!in_interrupt())
-		spin_unlock_irqrestore(&host->irq_lock, host->flags);
+	host->req_in_progress = 1;
 
 	OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
 	OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@
 		return DMA_FROM_DEVICE;
 }
 
+static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
+{
+	int dma_ch;
+
+	spin_lock(&host->irq_lock);
+	host->req_in_progress = 0;
+	dma_ch = host->dma_ch;
+	spin_unlock(&host->irq_lock);
+
+	omap_hsmmc_disable_irq(host);
+	/* Do not complete the request if DMA is still in progress */
+	if (mrq->data && host->use_dma && dma_ch != -1)
+		return;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 /*
  * Notify the transfer complete to MMC core
  */
@@ -843,25 +864,19 @@
 			return;
 		}
 
-		host->mrq = NULL;
-		mmc_request_done(host->mmc, mrq);
+		omap_hsmmc_request_done(host, mrq);
 		return;
 	}
 
 	host->data = NULL;
 
-	if (host->use_dma && host->dma_ch != -1)
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
-			omap_hsmmc_get_dma_dir(host, data));
-
 	if (!data->error)
 		data->bytes_xfered += data->blocks * (data->blksz);
 	else
 		data->bytes_xfered = 0;
 
 	if (!data->stop) {
-		host->mrq = NULL;
-		mmc_request_done(host->mmc, data->mrq);
+		omap_hsmmc_request_done(host, data->mrq);
 		return;
 	}
 	omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@
 			cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
 		}
 	}
-	if ((host->data == NULL && !host->response_busy) || cmd->error) {
-		host->mrq = NULL;
-		mmc_request_done(host->mmc, cmd->mrq);
-	}
+	if ((host->data == NULL && !host->response_busy) || cmd->error)
+		omap_hsmmc_request_done(host, cmd->mrq);
 }
 
 /*
@@ -898,14 +911,19 @@
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
+	int dma_ch;
+
 	host->data->error = errno;
 
-	if (host->use_dma && host->dma_ch != -1) {
+	spin_lock(&host->irq_lock);
+	dma_ch = host->dma_ch;
+	host->dma_ch = -1;
+	spin_unlock(&host->irq_lock);
+
+	if (host->use_dma && dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
-		omap_free_dma(host->dma_ch);
-		host->dma_ch = -1;
-		up(&host->sem);
+		omap_free_dma(dma_ch);
 	}
 	host->data = NULL;
 }
@@ -967,28 +985,21 @@
 			__func__);
 }
 
-/*
- * MMC controller IRQ handler
- */
-static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
 {
-	struct omap_hsmmc_host *host = dev_id;
 	struct mmc_data *data;
-	int end_cmd = 0, end_trans = 0, status;
+	int end_cmd = 0, end_trans = 0;
 
-	spin_lock(&host->irq_lock);
-
-	if (host->mrq == NULL) {
-		OMAP_HSMMC_WRITE(host->base, STAT,
-			OMAP_HSMMC_READ(host->base, STAT));
-		/* Flush posted write */
-		OMAP_HSMMC_READ(host->base, STAT);
-		spin_unlock(&host->irq_lock);
-		return IRQ_HANDLED;
+	if (!host->req_in_progress) {
+		do {
+			OMAP_HSMMC_WRITE(host->base, STAT, status);
+			/* Flush posted write */
+			status = OMAP_HSMMC_READ(host->base, STAT);
+		} while (status & INT_EN_MASK);
+		return;
 	}
 
 	data = host->data;
-	status = OMAP_HSMMC_READ(host->base, STAT);
 	dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
 
 	if (status & ERR) {
@@ -1041,15 +1052,27 @@
 	}
 
 	OMAP_HSMMC_WRITE(host->base, STAT, status);
-	/* Flush posted write */
-	OMAP_HSMMC_READ(host->base, STAT);
 
 	if (end_cmd || ((status & CC) && host->cmd))
 		omap_hsmmc_cmd_done(host, host->cmd);
 	if ((end_trans || (status & TC)) && host->mrq)
 		omap_hsmmc_xfer_done(host, data);
+}
 
-	spin_unlock(&host->irq_lock);
+/*
+ * MMC controller IRQ handler
+ */
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+{
+	struct omap_hsmmc_host *host = dev_id;
+	int status;
+
+	status = OMAP_HSMMC_READ(host->base, STAT);
+	do {
+		omap_hsmmc_do_irq(host, status);
+		/* Flush posted write */
+		status = OMAP_HSMMC_READ(host->base, STAT);
+	} while (status & INT_EN_MASK);
 
 	return IRQ_HANDLED;
 }
@@ -1244,31 +1267,47 @@
 /*
  * DMA call back function
  */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
 {
-	struct omap_hsmmc_host *host = data;
+	struct omap_hsmmc_host *host = cb_data;
+	struct mmc_data *data = host->mrq->data;
+	int dma_ch, req_in_progress;
 
 	if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
 		dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
 
-	if (host->dma_ch < 0)
+	spin_lock(&host->irq_lock);
+	if (host->dma_ch < 0) {
+		spin_unlock(&host->irq_lock);
 		return;
+	}
 
 	host->dma_sg_idx++;
 	if (host->dma_sg_idx < host->dma_len) {
 		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, host->data,
-					   host->data->sg + host->dma_sg_idx);
+		omap_hsmmc_config_dma_params(host, data,
+					   data->sg + host->dma_sg_idx);
+		spin_unlock(&host->irq_lock);
 		return;
 	}
 
-	omap_free_dma(host->dma_ch);
+	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+		omap_hsmmc_get_dma_dir(host, data));
+
+	req_in_progress = host->req_in_progress;
+	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
-	/*
-	 * DMA Callback: run in interrupt context.
-	 * mutex_unlock will throw a kernel warning if used.
-	 */
-	up(&host->sem);
+	spin_unlock(&host->irq_lock);
+
+	omap_free_dma(dma_ch);
+
+	/* If DMA has finished after TC, complete the request */
+	if (!req_in_progress) {
+		struct mmc_request *mrq = host->mrq;
+
+		host->mrq = NULL;
+		mmc_request_done(host->mmc, mrq);
+	}
 }
 
 /*
@@ -1277,7 +1316,7 @@
 static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
-	int dma_ch = 0, ret = 0, err = 1, i;
+	int dma_ch = 0, ret = 0, i;
 	struct mmc_data *data = req->data;
 
 	/* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@
 		 */
 		return -EINVAL;
 
-	/*
-	 * If for some reason the DMA transfer is still active,
-	 * we wait for timeout period and free the dma
-	 */
-	if (host->dma_ch != -1) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		schedule_timeout(100);
-		if (down_trylock(&host->sem)) {
-			omap_free_dma(host->dma_ch);
-			host->dma_ch = -1;
-			up(&host->sem);
-			return err;
-		}
-	} else {
-		if (down_trylock(&host->sem))
-			return err;
-	}
+	BUG_ON(host->dma_ch != -1);
 
 	ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
 			       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 	int err;
 
-	/*
-	 * Prevent races with the interrupt handler because of unexpected
-	 * interrupts, but not if we are already in interrupt context i.e.
-	 * retries.
-	 */
-	if (!in_interrupt()) {
-		spin_lock_irqsave(&host->irq_lock, host->flags);
-		/*
-		 * Protect the card from I/O if there is a possibility
-		 * it can be removed.
-		 */
-		if (host->protect_card) {
-			if (host->reqs_blocked < 3) {
-				/*
-				 * Ensure the controller is left in a consistent
-				 * state by resetting the command and data state
-				 * machines.
-				 */
-				omap_hsmmc_reset_controller_fsm(host, SRD);
-				omap_hsmmc_reset_controller_fsm(host, SRC);
-				host->reqs_blocked += 1;
-			}
-			req->cmd->error = -EBADF;
-			if (req->data)
-				req->data->error = -EBADF;
-			spin_unlock_irqrestore(&host->irq_lock, host->flags);
-			mmc_request_done(mmc, req);
-			return;
-		} else if (host->reqs_blocked)
-			host->reqs_blocked = 0;
-	}
+	BUG_ON(host->req_in_progress);
+	BUG_ON(host->dma_ch != -1);
+	if (host->protect_card) {
+		if (host->reqs_blocked < 3) {
+			/*
+			 * Ensure the controller is left in a consistent
+			 * state by resetting the command and data state
+			 * machines.
+			 */
+			omap_hsmmc_reset_controller_fsm(host, SRD);
+			omap_hsmmc_reset_controller_fsm(host, SRC);
+			host->reqs_blocked += 1;
+		}
+		req->cmd->error = -EBADF;
+		if (req->data)
+			req->data->error = -EBADF;
+		req->cmd->retries = 0;
+		mmc_request_done(mmc, req);
+		return;
+	} else if (host->reqs_blocked)
+		host->reqs_blocked = 0;
 	WARN_ON(host->mrq != NULL);
 	host->mrq = req;
 	err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@
 		if (req->data)
 			req->data->error = err;
 		host->mrq = NULL;
-		if (!in_interrupt())
-			spin_unlock_irqrestore(&host->irq_lock, host->flags);
 		mmc_request_done(mmc, req);
 		return;
 	}
@@ -2019,7 +2030,6 @@
 	mmc->f_min	= 400000;
 	mmc->f_max	= 52000000;
 
-	sema_init(&host->sem, 1);
 	spin_lock_init(&host->irq_lock);
 
 	host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@
 		}
 	}
 
-	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
-	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+	omap_hsmmc_disable_irq(host);
 
 	mmc_host_lazy_disable(host->mmc);
 
@@ -2258,10 +2267,12 @@
 }
 
 #ifdef CONFIG_PM
-static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_hsmmc_suspend(struct device *dev)
 {
 	int ret = 0;
+	struct platform_device *pdev = to_platform_device(dev);
 	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+	pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
 	if (host && host->suspended)
 		return 0;
@@ -2281,12 +2292,9 @@
 		}
 		cancel_work_sync(&host->mmc_carddetect_work);
 		mmc_host_enable(host->mmc);
-		ret = mmc_suspend_host(host->mmc, state);
+		ret = mmc_suspend_host(host->mmc);
 		if (ret == 0) {
-			OMAP_HSMMC_WRITE(host->base, ISE, 0);
-			OMAP_HSMMC_WRITE(host->base, IE, 0);
-
-
+			omap_hsmmc_disable_irq(host);
 			OMAP_HSMMC_WRITE(host->base, HCTL,
 				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
 			mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@
 }
 
 /* Routine to resume the MMC device */
-static int omap_hsmmc_resume(struct platform_device *pdev)
+static int omap_hsmmc_resume(struct device *dev)
 {
 	int ret = 0;
+	struct platform_device *pdev = to_platform_device(dev);
 	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
 
 	if (host && !host->suspended)
@@ -2363,13 +2372,17 @@
 #define omap_hsmmc_resume		NULL
 #endif
 
-static struct platform_driver omap_hsmmc_driver = {
-	.remove		= omap_hsmmc_remove,
+static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
 	.suspend	= omap_hsmmc_suspend,
 	.resume		= omap_hsmmc_resume,
+};
+
+static struct platform_driver omap_hsmmc_driver = {
+	.remove		= omap_hsmmc_remove,
 	.driver		= {
 		.name = DRIVER_NAME,
 		.owner = THIS_MODULE,
+		.pm = &omap_hsmmc_dev_pm_ops,
 	},
 };
 
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e7..0a4e43f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@
 	int ret = 0;
 
 	if (mmc)
-		ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
+		ret = mmc_suspend_host(mmc);
 
 	return ret;
 }
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf768..2e16e0a 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@
 static int s3cmci_suspend(struct device *dev)
 {
 	struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
-	struct pm_message event = { PM_EVENT_SUSPEND };
 
-	return mmc_suspend_host(mmc, event);
+	return mmc_suspend_host(mmc);
 }
 
 static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 7802a54..a2e9820 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@
 {
 	struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
 
-	return mmc_suspend_host(host->mmc, state);
+	return mmc_suspend_host(host->mmc);
 }
 
 static int sdhci_of_resume(struct of_device *ofdev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a1..c8623de 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@
 		  SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
 		  SDHCI_QUIRK_NO_CARD_NO_RESET,
 	.ops = {
-		.readl = sdhci_be32bs_readl,
-		.readw = esdhc_readw,
-		.readb = sdhci_be32bs_readb,
-		.writel = sdhci_be32bs_writel,
-		.writew = esdhc_writew,
-		.writeb = esdhc_writeb,
+		.read_l = sdhci_be32bs_readl,
+		.read_w = esdhc_readw,
+		.read_b = sdhci_be32bs_readb,
+		.write_l = sdhci_be32bs_writel,
+		.write_w = esdhc_writew,
+		.write_b = esdhc_writeb,
 		.set_clock = esdhc_set_clock,
 		.enable_dma = esdhc_enable_dma,
 		.get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3..68ddb75 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@
 	.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
 		  SDHCI_QUIRK_32BIT_DMA_SIZE,
 	.ops = {
-		.readl = sdhci_be32bs_readl,
-		.readw = sdhci_be32bs_readw,
-		.readb = sdhci_be32bs_readb,
-		.writel = sdhci_hlwd_writel,
-		.writew = sdhci_hlwd_writew,
-		.writeb = sdhci_hlwd_writeb,
+		.read_l = sdhci_be32bs_readl,
+		.read_w = sdhci_be32bs_readw,
+		.read_b = sdhci_be32bs_readb,
+		.write_l = sdhci_hlwd_writel,
+		.write_w = sdhci_hlwd_writew,
+		.write_b = sdhci_hlwd_writeb,
 	},
 };
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af6..65483fd 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@
 	host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
 	if (IS_ERR(host)) {
 		dev_err(&pdev->dev, "cannot allocate host\n");
-		return ERR_PTR(PTR_ERR(host));
+		return ERR_CAST(host);
 	}
 
 	slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40a..b6ee0d7 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
 #include <linux/mmc/host.h>
 
 #include <linux/io.h>
+#include <linux/sdhci-pltfm.h>
 
 #include "sdhci.h"
 
@@ -49,19 +50,18 @@
 
 static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
 {
+	struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
 	struct sdhci_host *host;
 	struct resource *iomem;
 	int ret;
 
-	BUG_ON(pdev == NULL);
-
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!iomem) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	if (resource_size(iomem) != 0x100)
+	if (resource_size(iomem) < 0x100)
 		dev_err(&pdev->dev, "Invalid iomem size. You may "
 			"experience problems.\n");
 
@@ -76,7 +76,12 @@
 	}
 
 	host->hw_name = "platform";
-	host->ops = &sdhci_pltfm_ops;
+	if (pdata && pdata->ops)
+		host->ops = pdata->ops;
+	else
+		host->ops = &sdhci_pltfm_ops;
+	if (pdata)
+		host->quirks = pdata->quirks;
 	host->irq = platform_get_irq(pdev, 0);
 
 	if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@
 		goto err_remap;
 	}
 
+	if (pdata && pdata->init) {
+		ret = pdata->init(host);
+		if (ret)
+			goto err_plat_init;
+	}
+
 	ret = sdhci_add_host(host);
 	if (ret)
 		goto err_add_host;
@@ -102,6 +113,9 @@
 	return 0;
 
 err_add_host:
+	if (pdata && pdata->exit)
+		pdata->exit(host);
+err_plat_init:
 	iounmap(host->ioaddr);
 err_remap:
 	release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@
 
 static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
 {
+	struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
 	struct sdhci_host *host = platform_get_drvdata(pdev);
 	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	int dead;
@@ -125,6 +140,8 @@
 		dead = 1;
 
 	sdhci_remove_host(host, dead);
+	if (pdata && pdata->exit)
+		pdata->exit(host);
 	iounmap(host->ioaddr);
 	release_mem_region(iomem->start, resource_size(iomem));
 	sdhci_free_host(host);
@@ -165,4 +182,3 @@
 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:sdhci");
-
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794..af21792 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@
 	host->irq = irq;
 
 	/* Setup quirks for the controller */
-
-	/* Currently with ADMA enabled we are getting some length
-	 * interrupts that are not being dealt with, do disable
-	 * ADMA until this is sorted out. */
-	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
-	host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
+	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
 
 #ifndef CONFIG_MMC_SDHCI_S3C_DMA
 
@@ -330,9 +325,6 @@
 	 * support as well. */
 	host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
 
-	/* PIO currently has problems with multi-block IO */
-	host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
-
 #endif /* CONFIG_MMC_SDHCI_S3C_DMA */
 
 	/* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 0000000..d70c54c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
+/*
+ * drivers/mmc/host/sdhci-spear.c
+ *
+ * Support of SDHCI platform devices for spear soc family
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdhci-spear.h>
+#include <linux/io.h>
+#include "sdhci.h"
+
+struct spear_sdhci {
+	struct clk *clk;
+	struct sdhci_plat_data *data;
+};
+
+/* sdhci ops */
+static struct sdhci_ops sdhci_pltfm_ops = {
+	/* Nothing to do for now. */
+};
+
+/* gpio card detection interrupt handler */
+static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
+{
+	struct platform_device *pdev = dev_id;
+	struct sdhci_host *host = platform_get_drvdata(pdev);
+	struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+	unsigned long gpio_irq_type;
+	int val;
+
+	val = gpio_get_value(sdhci->data->card_int_gpio);
+
+	/* val == 1 -> card removed, val == 0 -> card inserted */
+	/* if card removed - set irq for low level, else vice versa */
+	gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
+	set_irq_type(irq, gpio_irq_type);
+
+	if (sdhci->data->card_power_gpio >= 0) {
+		if (!sdhci->data->power_always_enb) {
+			/* if card inserted, give power, otherwise remove it */
+			val = sdhci->data->power_active_high ? !val : val ;
+			gpio_set_value(sdhci->data->card_power_gpio, val);
+		}
+	}
+
+	/* inform sdhci driver about card insertion/removal */
+	tasklet_schedule(&host->card_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit sdhci_probe(struct platform_device *pdev)
+{
+	struct sdhci_host *host;
+	struct resource *iomem;
+	struct spear_sdhci *sdhci;
+	int ret;
+
+	BUG_ON(pdev == NULL);
+
+	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!iomem) {
+		ret = -ENOMEM;
+		dev_dbg(&pdev->dev, "memory resource not defined\n");
+		goto err;
+	}
+
+	if (!request_mem_region(iomem->start, resource_size(iomem),
+				"spear-sdhci")) {
+		ret = -EBUSY;
+		dev_dbg(&pdev->dev, "cannot request region\n");
+		goto err;
+	}
+
+	sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+	if (!sdhci) {
+		ret = -ENOMEM;
+		dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
+		goto err_kzalloc;
+	}
+
+	/* clk enable */
+	sdhci->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(sdhci->clk)) {
+		ret = PTR_ERR(sdhci->clk);
+		dev_dbg(&pdev->dev, "Error getting clock\n");
+		goto err_clk_get;
+	}
+
+	ret = clk_enable(sdhci->clk);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Error enabling clock\n");
+		goto err_clk_enb;
+	}
+
+	/* overwrite platform_data */
+	sdhci->data = dev_get_platdata(&pdev->dev);
+	pdev->dev.platform_data = sdhci;
+
+	if (pdev->dev.parent)
+		host = sdhci_alloc_host(pdev->dev.parent, 0);
+	else
+		host = sdhci_alloc_host(&pdev->dev, 0);
+
+	if (IS_ERR(host)) {
+		ret = PTR_ERR(host);
+		dev_dbg(&pdev->dev, "error allocating host\n");
+		goto err_alloc_host;
+	}
+
+	host->hw_name = "sdhci";
+	host->ops = &sdhci_pltfm_ops;
+	host->irq = platform_get_irq(pdev, 0);
+	host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+
+	host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+	if (!host->ioaddr) {
+		ret = -ENOMEM;
+		dev_dbg(&pdev->dev, "failed to remap registers\n");
+		goto err_ioremap;
+	}
+
+	ret = sdhci_add_host(host);
+	if (ret) {
+		dev_dbg(&pdev->dev, "error adding host\n");
+		goto err_add_host;
+	}
+
+	platform_set_drvdata(pdev, host);
+
+	/*
+	 * It is optional to use GPIOs for sdhci Power control & sdhci card
+	 * interrupt detection. If sdhci->data is NULL, then use original sdhci
+	 * lines otherwise GPIO lines.
+	 * If GPIO is selected for power control, then power should be disabled
+	 * after card removal and should be enabled when card insertion
+	 * interrupt occurs
+	 */
+	if (!sdhci->data)
+		return 0;
+
+	if (sdhci->data->card_power_gpio >= 0) {
+		int val = 0;
+
+		ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+		if (ret < 0) {
+			dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+					sdhci->data->card_power_gpio);
+			goto err_pgpio_request;
+		}
+
+		if (sdhci->data->power_always_enb)
+			val = sdhci->data->power_active_high;
+		else
+			val = !sdhci->data->power_active_high;
+
+		ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
+		if (ret) {
+			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+					sdhci->data->card_power_gpio);
+			goto err_pgpio_direction;
+		}
+
+		gpio_set_value(sdhci->data->card_power_gpio, 1);
+	}
+
+	if (sdhci->data->card_int_gpio >= 0) {
+		ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+		if (ret < 0) {
+			dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+					sdhci->data->card_int_gpio);
+			goto err_igpio_request;
+		}
+
+		ret = gpio_direction_input(sdhci->data->card_int_gpio);
+		if (ret) {
+			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+					sdhci->data->card_int_gpio);
+			goto err_igpio_direction;
+		}
+		ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+				sdhci_gpio_irq, IRQF_TRIGGER_LOW,
+				mmc_hostname(host->mmc), pdev);
+		if (ret) {
+			dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
+					sdhci->data->card_int_gpio);
+			goto err_igpio_request_irq;
+		}
+
+	}
+
+	return 0;
+
+err_igpio_request_irq:
+err_igpio_direction:
+	if (sdhci->data->card_int_gpio >= 0)
+		gpio_free(sdhci->data->card_int_gpio);
+err_igpio_request:
+err_pgpio_direction:
+	if (sdhci->data->card_power_gpio >= 0)
+		gpio_free(sdhci->data->card_power_gpio);
+err_pgpio_request:
+	platform_set_drvdata(pdev, NULL);
+	sdhci_remove_host(host, 1);
+err_add_host:
+	iounmap(host->ioaddr);
+err_ioremap:
+	sdhci_free_host(host);
+err_alloc_host:
+	clk_disable(sdhci->clk);
+err_clk_enb:
+	clk_put(sdhci->clk);
+err_clk_get:
+	kfree(sdhci);
+err_kzalloc:
+	release_mem_region(iomem->start, resource_size(iomem));
+err:
+	dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
+	return ret;
+}
+
+static int __devexit sdhci_remove(struct platform_device *pdev)
+{
+	struct sdhci_host *host = platform_get_drvdata(pdev);
+	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+	int dead;
+	u32 scratch;
+
+	if (sdhci->data) {
+		if (sdhci->data->card_int_gpio >= 0) {
+			free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
+			gpio_free(sdhci->data->card_int_gpio);
+		}
+
+		if (sdhci->data->card_power_gpio >= 0)
+			gpio_free(sdhci->data->card_power_gpio);
+	}
+
+	platform_set_drvdata(pdev, NULL);
+	dead = 0;
+	scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+	if (scratch == (u32)-1)
+		dead = 1;
+
+	sdhci_remove_host(host, dead);
+	iounmap(host->ioaddr);
+	sdhci_free_host(host);
+	clk_disable(sdhci->clk);
+	clk_put(sdhci->clk);
+	kfree(sdhci);
+	if (iomem)
+		release_mem_region(iomem->start, resource_size(iomem));
+
+	return 0;
+}
+
+static struct platform_driver sdhci_driver = {
+	.driver = {
+		.name	= "sdhci",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sdhci_probe,
+	.remove		= __devexit_p(sdhci_remove),
+};
+
+static int __init sdhci_init(void)
+{
+	return platform_driver_register(&sdhci_driver);
+}
+module_init(sdhci_init);
+
+static void __exit sdhci_exit(void)
+{
+	platform_driver_unregister(&sdhci_driver);
+}
+module_exit(sdhci_exit);
+
+MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa..c6d1bd8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@
 		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
 	}
 
-	/*
-	 * Add a terminating entry.
-	 */
+	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+		/*
+		* Mark the last descriptor as the terminating descriptor
+		*/
+		if (desc != host->adma_desc) {
+			desc -= 8;
+			desc[0] |= 0x2; /* end */
+		}
+	} else {
+		/*
+		* Add a terminating entry.
+		*/
 
-	/* nop, end, valid */
-	sdhci_set_adma_desc(desc, 0, 0, 0x3);
+		/* nop, end, valid */
+		sdhci_set_adma_desc(desc, 0, 0, 0x3);
+	}
 
 	/*
 	 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@
 
 	sdhci_disable_card_detection(host);
 
-	ret = mmc_suspend_host(host->mmc, state);
+	ret = mmc_suspend_host(host->mmc);
 	if (ret)
 		return ret;
 
@@ -1744,7 +1754,8 @@
 	host->max_clk =
 		(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
 	host->max_clk *= 1000000;
-	if (host->max_clk == 0) {
+	if (host->max_clk == 0 || host->quirks &
+			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
 		if (!host->ops->get_max_clock) {
 			printk(KERN_ERR
 			       "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f..c846813 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
 #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
-		SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR)
+		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
 #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
 
 #define SDHCI_ACMD12_ERR	0x3C
@@ -236,6 +236,10 @@
 #define SDHCI_QUIRK_DELAY_AFTER_POWER			(1<<23)
 /* Controller uses SDCLK instead of TMCLK for data timeouts */
 #define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK		(1<<24)
+/* Controller reports wrong base clock capability */
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN		(1<<25)
+/* Controller cannot support End Attribute in NOP ADMA descriptor */
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC		(1<<26)
 
 	int			irq;		/* Device IRQ */
 	void __iomem *		ioaddr;		/* Mapped address */
@@ -294,12 +298,12 @@
 
 struct sdhci_ops {
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
-	u32		(*readl)(struct sdhci_host *host, int reg);
-	u16		(*readw)(struct sdhci_host *host, int reg);
-	u8		(*readb)(struct sdhci_host *host, int reg);
-	void		(*writel)(struct sdhci_host *host, u32 val, int reg);
-	void		(*writew)(struct sdhci_host *host, u16 val, int reg);
-	void		(*writeb)(struct sdhci_host *host, u8 val, int reg);
+	u32		(*read_l)(struct sdhci_host *host, int reg);
+	u16		(*read_w)(struct sdhci_host *host, int reg);
+	u8		(*read_b)(struct sdhci_host *host, int reg);
+	void		(*write_l)(struct sdhci_host *host, u32 val, int reg);
+	void		(*write_w)(struct sdhci_host *host, u16 val, int reg);
+	void		(*write_b)(struct sdhci_host *host, u8 val, int reg);
 #endif
 
 	void	(*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@
 
 static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 {
-	if (unlikely(host->ops->writel))
-		host->ops->writel(host, val, reg);
+	if (unlikely(host->ops->write_l))
+		host->ops->write_l(host, val, reg);
 	else
 		writel(val, host->ioaddr + reg);
 }
 
 static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 {
-	if (unlikely(host->ops->writew))
-		host->ops->writew(host, val, reg);
+	if (unlikely(host->ops->write_w))
+		host->ops->write_w(host, val, reg);
 	else
 		writew(val, host->ioaddr + reg);
 }
 
 static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
 {
-	if (unlikely(host->ops->writeb))
-		host->ops->writeb(host, val, reg);
+	if (unlikely(host->ops->write_b))
+		host->ops->write_b(host, val, reg);
 	else
 		writeb(val, host->ioaddr + reg);
 }
 
 static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
 {
-	if (unlikely(host->ops->readl))
-		return host->ops->readl(host, reg);
+	if (unlikely(host->ops->read_l))
+		return host->ops->read_l(host, reg);
 	else
 		return readl(host->ioaddr + reg);
 }
 
 static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
 {
-	if (unlikely(host->ops->readw))
-		return host->ops->readw(host, reg);
+	if (unlikely(host->ops->read_w))
+		return host->ops->read_w(host, reg);
 	else
 		return readw(host->ioaddr + reg);
 }
 
 static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
 {
-	if (unlikely(host->ops->readb))
-		return host->ops->readb(host, reg);
+	if (unlikely(host->ops->read_b))
+		return host->ops->read_b(host, reg);
 	else
 		return readb(host->ioaddr + reg);
 }
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c..e7507af 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@
 {
 	struct mmc_host *mmc = link->priv;
 	dev_dbg(&link->dev, "suspend\n");
-	mmc_suspend_host(mmc, PMSG_SUSPEND);
+	mmc_suspend_host(mmc);
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 0000000..eb97830
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
+/*
+ * MMCIF eMMC driver.
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Yusuke Goda <yusuke.goda.sx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *
+ * TODO
+ *  1. DMA
+ *  2. Power management
+ *  3. Handle MMC errors better
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/sh_mmcif.h>
+
+#define DRIVER_NAME	"sh_mmcif"
+#define DRIVER_VERSION	"2010-04-28"
+
+#define MMCIF_CE_CMD_SET	0x00000000
+#define MMCIF_CE_ARG		0x00000008
+#define MMCIF_CE_ARG_CMD12	0x0000000C
+#define MMCIF_CE_CMD_CTRL	0x00000010
+#define MMCIF_CE_BLOCK_SET	0x00000014
+#define MMCIF_CE_CLK_CTRL	0x00000018
+#define MMCIF_CE_BUF_ACC	0x0000001C
+#define MMCIF_CE_RESP3		0x00000020
+#define MMCIF_CE_RESP2		0x00000024
+#define MMCIF_CE_RESP1		0x00000028
+#define MMCIF_CE_RESP0		0x0000002C
+#define MMCIF_CE_RESP_CMD12	0x00000030
+#define MMCIF_CE_DATA		0x00000034
+#define MMCIF_CE_INT		0x00000040
+#define MMCIF_CE_INT_MASK	0x00000044
+#define MMCIF_CE_HOST_STS1	0x00000048
+#define MMCIF_CE_HOST_STS2	0x0000004C
+#define MMCIF_CE_VERSION	0x0000007C
+
+/* CE_CMD_SET */
+#define CMD_MASK		0x3f000000
+#define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
+#define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
+#define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
+#define CMD_SET_RBSY		(1 << 21) /* R1b */
+#define CMD_SET_CCSEN		(1 << 20)
+#define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
+#define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
+#define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
+#define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
+#define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
+#define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
+#define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
+#define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
+#define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
+#define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
+#define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
+#define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
+#define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
+#define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
+#define CMD_SET_CCSH		(1 << 5)
+#define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
+#define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
+#define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
+
+/* CE_CMD_CTRL */
+#define CMD_CTRL_BREAK		(1 << 0)
+
+/* CE_BLOCK_SET */
+#define BLOCK_SIZE_MASK		0x0000ffff
+
+/* CE_CLK_CTRL */
+#define CLK_ENABLE		(1 << 24) /* 1: output mmc clock */
+#define CLK_CLEAR		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define CLK_SUP_PCLK		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define SRSPTO_256		((1 << 13) | (0 << 12)) /* resp timeout */
+#define SRBSYTO_29		((1 << 11) | (1 << 10) |	\
+				 (1 << 9) | (1 << 8)) /* resp busy timeout */
+#define SRWDTO_29		((1 << 7) | (1 << 6) |		\
+				 (1 << 5) | (1 << 4)) /* read/write timeout */
+#define SCCSTO_29		((1 << 3) | (1 << 2) |		\
+				 (1 << 1) | (1 << 0)) /* ccs timeout */
+
+/* CE_BUF_ACC */
+#define BUF_ACC_DMAWEN		(1 << 25)
+#define BUF_ACC_DMAREN		(1 << 24)
+#define BUF_ACC_BUSW_32		(0 << 17)
+#define BUF_ACC_BUSW_16		(1 << 17)
+#define BUF_ACC_ATYP		(1 << 16)
+
+/* CE_INT */
+#define INT_CCSDE		(1 << 29)
+#define INT_CMD12DRE		(1 << 26)
+#define INT_CMD12RBE		(1 << 25)
+#define INT_CMD12CRE		(1 << 24)
+#define INT_DTRANE		(1 << 23)
+#define INT_BUFRE		(1 << 22)
+#define INT_BUFWEN		(1 << 21)
+#define INT_BUFREN		(1 << 20)
+#define INT_CCSRCV		(1 << 19)
+#define INT_RBSYE		(1 << 17)
+#define INT_CRSPE		(1 << 16)
+#define INT_CMDVIO		(1 << 15)
+#define INT_BUFVIO		(1 << 14)
+#define INT_WDATERR		(1 << 11)
+#define INT_RDATERR		(1 << 10)
+#define INT_RIDXERR		(1 << 9)
+#define INT_RSPERR		(1 << 8)
+#define INT_CCSTO		(1 << 5)
+#define INT_CRCSTO		(1 << 4)
+#define INT_WDATTO		(1 << 3)
+#define INT_RDATTO		(1 << 2)
+#define INT_RBSYTO		(1 << 1)
+#define INT_RSPTO		(1 << 0)
+#define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
+				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
+				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
+				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+
+/* CE_INT_MASK */
+#define MASK_ALL		0x00000000
+#define MASK_MCCSDE		(1 << 29)
+#define MASK_MCMD12DRE		(1 << 26)
+#define MASK_MCMD12RBE		(1 << 25)
+#define MASK_MCMD12CRE		(1 << 24)
+#define MASK_MDTRANE		(1 << 23)
+#define MASK_MBUFRE		(1 << 22)
+#define MASK_MBUFWEN		(1 << 21)
+#define MASK_MBUFREN		(1 << 20)
+#define MASK_MCCSRCV		(1 << 19)
+#define MASK_MRBSYE		(1 << 17)
+#define MASK_MCRSPE		(1 << 16)
+#define MASK_MCMDVIO		(1 << 15)
+#define MASK_MBUFVIO		(1 << 14)
+#define MASK_MWDATERR		(1 << 11)
+#define MASK_MRDATERR		(1 << 10)
+#define MASK_MRIDXERR		(1 << 9)
+#define MASK_MRSPERR		(1 << 8)
+#define MASK_MCCSTO		(1 << 5)
+#define MASK_MCRCSTO		(1 << 4)
+#define MASK_MWDATTO		(1 << 3)
+#define MASK_MRDATTO		(1 << 2)
+#define MASK_MRBSYTO		(1 << 1)
+#define MASK_MRSPTO		(1 << 0)
+
+/* CE_HOST_STS1 */
+#define STS1_CMDSEQ		(1 << 31)
+
+/* CE_HOST_STS2 */
+#define STS2_CRCSTE		(1 << 31)
+#define STS2_CRC16E		(1 << 30)
+#define STS2_AC12CRCE		(1 << 29)
+#define STS2_RSPCRC7E		(1 << 28)
+#define STS2_CRCSTEBE		(1 << 27)
+#define STS2_RDATEBE		(1 << 26)
+#define STS2_AC12REBE		(1 << 25)
+#define STS2_RSPEBE		(1 << 24)
+#define STS2_AC12IDXE		(1 << 23)
+#define STS2_RSPIDXE		(1 << 22)
+#define STS2_CCSTO		(1 << 15)
+#define STS2_RDATTO		(1 << 14)
+#define STS2_DATBSYTO		(1 << 13)
+#define STS2_CRCSTTO		(1 << 12)
+#define STS2_AC12BSYTO		(1 << 11)
+#define STS2_RSPBSYTO		(1 << 10)
+#define STS2_AC12RSPTO		(1 << 9)
+#define STS2_RSPTO		(1 << 8)
+#define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
+				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
+#define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
+				 STS2_DATBSYTO | STS2_CRCSTTO |		\
+				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
+				 STS2_AC12RSPTO | STS2_RSPTO)
+
+/* CE_VERSION */
+#define SOFT_RST_ON		(1 << 31)
+#define SOFT_RST_OFF		(0 << 31)
+
+#define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
+#define CLKDEV_MMC_DATA		20000000 /* 20MHz */
+#define CLKDEV_INIT		400000   /* 400 KHz */
+
+struct sh_mmcif_host {
+	struct mmc_host *mmc;
+	struct mmc_data *data;
+	struct mmc_command *cmd;
+	struct platform_device *pd;
+	struct clk *hclk;
+	unsigned int clk;
+	int bus_width;
+	u16 wait_int;
+	u16 sd_error;
+	long timeout;
+	void __iomem *addr;
+	wait_queue_head_t intr_wait;
+};
+
+static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
+{
+	return readl(host->addr + reg);
+}
+
+static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
+					unsigned int reg, u32 val)
+{
+	writel(val, host->addr + reg);
+}
+
+static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
+					unsigned int reg, u32 val)
+{
+	writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
+					unsigned int reg, u32 val)
+{
+	writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+
+static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
+{
+	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
+
+	if (!clk)
+		return;
+	if (p->sup_pclk && clk == host->clk)
+		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
+	else
+		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
+			(ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
+
+	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+}
+
+static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
+{
+	u32 tmp;
+
+	tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
+
+	sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
+	sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
+	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
+		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+	/* byte swap on */
+	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
+}
+
+static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
+{
+	u32 state1, state2;
+	int ret, timeout = 10000000;
+
+	host->sd_error = 0;
+	host->wait_int = 0;
+
+	state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
+	state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
+	pr_debug("%s: ERR HOST_STS1 = %08x\n", \
+			DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
+	pr_debug("%s: ERR HOST_STS2 = %08x\n", \
+			DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
+
+	if (state1 & STS1_CMDSEQ) {
+		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
+		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
+		while (1) {
+			timeout--;
+			if (timeout < 0) {
+				pr_err(DRIVER_NAME": Forceed end of " \
+					"command sequence timeout err\n");
+				return -EIO;
+			}
+			if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
+								& STS1_CMDSEQ))
+				break;
+			mdelay(1);
+		}
+		sh_mmcif_sync_reset(host);
+		pr_debug(DRIVER_NAME": Forced end of command sequence\n");
+		return -EIO;
+	}
+
+	if (state2 & STS2_CRC_ERR) {
+		pr_debug(DRIVER_NAME": Happened CRC error\n");
+		ret = -EIO;
+	} else if (state2 & STS2_TIMEOUT_ERR) {
+		pr_debug(DRIVER_NAME": Happened Timeout error\n");
+		ret = -ETIMEDOUT;
+	} else {
+		pr_debug(DRIVER_NAME": Happened End/Index error\n");
+		ret = -EIO;
+	}
+	return ret;
+}
+
+static int sh_mmcif_single_read(struct sh_mmcif_host *host,
+					struct mmc_request *mrq)
+{
+	struct mmc_data *data = mrq->data;
+	long time;
+	u32 blocksize, i, *p = sg_virt(data->sg);
+
+	host->wait_int = 0;
+
+	/* buf read enable */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+	time = wait_event_interruptible_timeout(host->intr_wait,
+			host->wait_int == 1 ||
+			host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+		return sh_mmcif_error_manage(host);
+
+	host->wait_int = 0;
+	blocksize = (BLOCK_SIZE_MASK &
+			sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+	for (i = 0; i < blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+
+	/* buffer read end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+	time = wait_event_interruptible_timeout(host->intr_wait,
+			host->wait_int == 1 ||
+			host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+		return sh_mmcif_error_manage(host);
+
+	host->wait_int = 0;
+	return 0;
+}
+
+static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
+					struct mmc_request *mrq)
+{
+	struct mmc_data *data = mrq->data;
+	long time;
+	u32 blocksize, i, j, sec, *p;
+
+	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+	for (j = 0; j < data->sg_len; j++) {
+		p = sg_virt(data->sg);
+		host->wait_int = 0;
+		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+			/* buf read enable */
+			time = wait_event_interruptible_timeout(host->intr_wait,
+				host->wait_int == 1 ||
+				host->sd_error == 1, host->timeout);
+
+			if (host->wait_int != 1 &&
+			    (time == 0 || host->sd_error != 0))
+				return sh_mmcif_error_manage(host);
+
+			host->wait_int = 0;
+			for (i = 0; i < blocksize / 4; i++)
+				*p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+		}
+		if (j < data->sg_len - 1)
+			data->sg++;
+	}
+	return 0;
+}
+
+static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+					struct mmc_request *mrq)
+{
+	struct mmc_data *data = mrq->data;
+	long time;
+	u32 blocksize, i, *p = sg_virt(data->sg);
+
+	host->wait_int = 0;
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	/* buf write enable */
+	time = wait_event_interruptible_timeout(host->intr_wait,
+			host->wait_int == 1 ||
+			host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+		return sh_mmcif_error_manage(host);
+
+	host->wait_int = 0;
+	blocksize = (BLOCK_SIZE_MASK &
+			sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+	for (i = 0; i < blocksize / 4; i++)
+		sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+
+	/* buffer write end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+
+	time = wait_event_interruptible_timeout(host->intr_wait,
+			host->wait_int == 1 ||
+			host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+		return sh_mmcif_error_manage(host);
+
+	host->wait_int = 0;
+	return 0;
+}
+
+static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
+						struct mmc_request *mrq)
+{
+	struct mmc_data *data = mrq->data;
+	long time;
+	u32 i, sec, j, blocksize, *p;
+
+	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+
+	for (j = 0; j < data->sg_len; j++) {
+		p = sg_virt(data->sg);
+		host->wait_int = 0;
+		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+			/* buf write enable*/
+			time = wait_event_interruptible_timeout(host->intr_wait,
+				host->wait_int == 1 ||
+				host->sd_error == 1, host->timeout);
+
+			if (host->wait_int != 1 &&
+			    (time == 0 || host->sd_error != 0))
+				return sh_mmcif_error_manage(host);
+
+			host->wait_int = 0;
+			for (i = 0; i < blocksize / 4; i++)
+				sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+		}
+		if (j < data->sg_len - 1)
+			data->sg++;
+	}
+	return 0;
+}
+
+static void sh_mmcif_get_response(struct sh_mmcif_host *host,
+						struct mmc_command *cmd)
+{
+	if (cmd->flags & MMC_RSP_136) {
+		cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
+		cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
+		cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
+		cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+	} else
+		cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+}
+
+static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
+						struct mmc_command *cmd)
+{
+	cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
+}
+
+static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
+		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+{
+	u32 tmp = 0;
+
+	/* Response Type check */
+	switch (mmc_resp_type(cmd)) {
+	case MMC_RSP_NONE:
+		tmp |= CMD_SET_RTYP_NO;
+		break;
+	case MMC_RSP_R1:
+	case MMC_RSP_R1B:
+	case MMC_RSP_R3:
+		tmp |= CMD_SET_RTYP_6B;
+		break;
+	case MMC_RSP_R2:
+		tmp |= CMD_SET_RTYP_17B;
+		break;
+	default:
+		pr_err(DRIVER_NAME": Not support type response.\n");
+		break;
+	}
+	switch (opc) {
+	/* RBSY */
+	case MMC_SWITCH:
+	case MMC_STOP_TRANSMISSION:
+	case MMC_SET_WRITE_PROT:
+	case MMC_CLR_WRITE_PROT:
+	case MMC_ERASE:
+	case MMC_GEN_CMD:
+		tmp |= CMD_SET_RBSY;
+		break;
+	}
+	/* WDAT / DATW */
+	if (host->data) {
+		tmp |= CMD_SET_WDAT;
+		switch (host->bus_width) {
+		case MMC_BUS_WIDTH_1:
+			tmp |= CMD_SET_DATW_1;
+			break;
+		case MMC_BUS_WIDTH_4:
+			tmp |= CMD_SET_DATW_4;
+			break;
+		case MMC_BUS_WIDTH_8:
+			tmp |= CMD_SET_DATW_8;
+			break;
+		default:
+			pr_err(DRIVER_NAME": Not support bus width.\n");
+			break;
+		}
+	}
+	/* DWEN */
+	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
+		tmp |= CMD_SET_DWEN;
+	/* CMLTE/CMD12EN */
+	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
+		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
+		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
+					mrq->data->blocks << 16);
+	}
+	/* RIDXC[1:0] check bits */
+	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
+	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+		tmp |= CMD_SET_RIDXC_BITS;
+	/* RCRC7C[1:0] check bits */
+	if (opc == MMC_SEND_OP_COND)
+		tmp |= CMD_SET_CRC7C_BITS;
+	/* RCRC7C[1:0] internal CRC7 */
+	if (opc == MMC_ALL_SEND_CID ||
+		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+		tmp |= CMD_SET_CRC7C_INTERNAL;
+
+	return opc = ((opc << 24) | tmp);
+}
+
+static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
+				struct mmc_request *mrq, u32 opc)
+{
+	u32 ret;
+
+	switch (opc) {
+	case MMC_READ_MULTIPLE_BLOCK:
+		ret = sh_mmcif_multi_read(host, mrq);
+		break;
+	case MMC_WRITE_MULTIPLE_BLOCK:
+		ret = sh_mmcif_multi_write(host, mrq);
+		break;
+	case MMC_WRITE_BLOCK:
+		ret = sh_mmcif_single_write(host, mrq);
+		break;
+	case MMC_READ_SINGLE_BLOCK:
+	case MMC_SEND_EXT_CSD:
+		ret = sh_mmcif_single_read(host, mrq);
+		break;
+	default:
+		pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
+			struct mmc_request *mrq, struct mmc_command *cmd)
+{
+	long time;
+	int ret = 0, mask = 0;
+	u32 opc = cmd->opcode;
+
+	host->cmd = cmd;
+
+	switch (opc) {
+	/* respons busy check */
+	case MMC_SWITCH:
+	case MMC_STOP_TRANSMISSION:
+	case MMC_SET_WRITE_PROT:
+	case MMC_CLR_WRITE_PROT:
+	case MMC_ERASE:
+	case MMC_GEN_CMD:
+		mask = MASK_MRBSYE;
+		break;
+	default:
+		mask = MASK_MCRSPE;
+		break;
+	}
+	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
+		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
+		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
+		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
+
+	if (host->data) {
+		sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
+		sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
+	}
+	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+
+	sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
+	sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
+	/* set arg */
+	sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
+	host->wait_int = 0;
+	/* set cmd */
+	sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
+
+	time = wait_event_interruptible_timeout(host->intr_wait,
+		host->wait_int == 1 || host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && time == 0) {
+		cmd->error = sh_mmcif_error_manage(host);
+		return;
+	}
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			break;
+		default:
+			pr_debug("%s: Cmd(d'%d) err\n",
+					DRIVER_NAME, cmd->opcode);
+			cmd->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		host->sd_error = 0;
+		host->wait_int = 0;
+		return;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = ret;
+		host->wait_int = 0;
+		return;
+	}
+	if (host->wait_int == 1) {
+		sh_mmcif_get_response(host, cmd);
+		host->wait_int = 0;
+	}
+	if (host->data) {
+		ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
+		if (ret < 0)
+			mrq->data->bytes_xfered = 0;
+		else
+			mrq->data->bytes_xfered =
+				mrq->data->blocks * mrq->data->blksz;
+	}
+	cmd->error = ret;
+}
+
+static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
+		struct mmc_request *mrq, struct mmc_command *cmd)
+{
+	long time;
+
+	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+	else {
+		pr_err(DRIVER_NAME": not support stop cmd\n");
+		cmd->error = sh_mmcif_error_manage(host);
+		return;
+	}
+
+	time = wait_event_interruptible_timeout(host->intr_wait,
+			host->wait_int == 1 ||
+			host->sd_error == 1, host->timeout);
+	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
+		cmd->error = sh_mmcif_error_manage(host);
+		return;
+	}
+	sh_mmcif_get_cmd12response(host, cmd);
+	host->wait_int = 0;
+	cmd->error = 0;
+}
+
+static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+	struct sh_mmcif_host *host = mmc_priv(mmc);
+
+	switch (mrq->cmd->opcode) {
+	/* MMCIF does not support SD/SDIO command */
+	case SD_IO_SEND_OP_COND:
+	case MMC_APP_CMD:
+		mrq->cmd->error = -ETIMEDOUT;
+		mmc_request_done(mmc, mrq);
+		return;
+	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+		if (!mrq->data) {
+			/* send_if_cond cmd (not support) */
+			mrq->cmd->error = -ETIMEDOUT;
+			mmc_request_done(mmc, mrq);
+			return;
+		}
+		break;
+	default:
+		break;
+	}
+	host->data = mrq->data;
+	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
+	host->data = NULL;
+
+	if (mrq->cmd->error != 0) {
+		mmc_request_done(mmc, mrq);
+		return;
+	}
+	if (mrq->stop)
+		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+	mmc_request_done(mmc, mrq);
+}
+
+static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct sh_mmcif_host *host = mmc_priv(mmc);
+	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+	if (ios->power_mode == MMC_POWER_OFF) {
+		/* clock stop */
+		sh_mmcif_clock_control(host, 0);
+		if (p->down_pwr)
+			p->down_pwr(host->pd);
+		return;
+	} else if (ios->power_mode == MMC_POWER_UP) {
+		if (p->set_pwr)
+			p->set_pwr(host->pd, ios->power_mode);
+	}
+
+	if (ios->clock)
+		sh_mmcif_clock_control(host, ios->clock);
+
+	host->bus_width = ios->bus_width;
+}
+
+static struct mmc_host_ops sh_mmcif_ops = {
+	.request	= sh_mmcif_request,
+	.set_ios	= sh_mmcif_set_ios,
+};
+
+static void sh_mmcif_detect(struct mmc_host *mmc)
+{
+	mmc_detect_change(mmc, 0);
+}
+
+static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	u32 state = 0;
+	int err = 0;
+
+	state = sh_mmcif_readl(host, MMCIF_CE_INT);
+
+	if (state & INT_RBSYE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
+	} else if (state & INT_CRSPE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
+	} else if (state & INT_BUFREN) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+	} else if (state & INT_BUFWEN) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	} else if (state & INT_CMD12DRE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT,
+			~(INT_CMD12DRE | INT_CMD12RBE |
+			  INT_CMD12CRE | INT_BUFRE));
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+	} else if (state & INT_BUFRE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+	} else if (state & INT_DTRANE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	} else if (state & INT_CMD12RBE) {
+		sh_mmcif_writel(host, MMCIF_CE_INT,
+				~(INT_CMD12RBE | INT_CMD12CRE));
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+	} else if (state & INT_ERR_STS) {
+		/* err interrupts */
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+		err = 1;
+	} else {
+		pr_debug("%s: Not support int\n", DRIVER_NAME);
+		sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+		err = 1;
+	}
+	if (err) {
+		host->sd_error = 1;
+		pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
+	}
+	host->wait_int = 1;
+	wake_up(&host->intr_wait);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit sh_mmcif_probe(struct platform_device *pdev)
+{
+	int ret = 0, irq[2];
+	struct mmc_host *mmc;
+	struct sh_mmcif_host *host = NULL;
+	struct sh_mmcif_plat_data *pd = NULL;
+	struct resource *res;
+	void __iomem *reg;
+	char clk_name[8];
+
+	irq[0] = platform_get_irq(pdev, 0);
+	irq[1] = platform_get_irq(pdev, 1);
+	if (irq[0] < 0 || irq[1] < 0) {
+		pr_err(DRIVER_NAME": Get irq error\n");
+		return -ENXIO;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "platform_get_resource error.\n");
+		return -ENXIO;
+	}
+	reg = ioremap(res->start, resource_size(res));
+	if (!reg) {
+		dev_err(&pdev->dev, "ioremap error.\n");
+		return -ENOMEM;
+	}
+	pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
+	if (!pd) {
+		dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
+		ret = -ENXIO;
+		goto clean_up;
+	}
+	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
+	if (!mmc) {
+		ret = -ENOMEM;
+		goto clean_up;
+	}
+	host		= mmc_priv(mmc);
+	host->mmc	= mmc;
+	host->addr	= reg;
+	host->timeout	= 1000;
+
+	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
+	host->hclk = clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(host->hclk)) {
+		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+		ret = PTR_ERR(host->hclk);
+		goto clean_up1;
+	}
+	clk_enable(host->hclk);
+	host->clk = clk_get_rate(host->hclk);
+	host->pd = pdev;
+
+	init_waitqueue_head(&host->intr_wait);
+
+	mmc->ops = &sh_mmcif_ops;
+	mmc->f_max = host->clk;
+	/* close to 400KHz */
+	if (mmc->f_max < 51200000)
+		mmc->f_min = mmc->f_max / 128;
+	else if (mmc->f_max < 102400000)
+		mmc->f_min = mmc->f_max / 256;
+	else
+		mmc->f_min = mmc->f_max / 512;
+	if (pd->ocr)
+		mmc->ocr_avail = pd->ocr;
+	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
+	if (pd->caps)
+		mmc->caps |= pd->caps;
+	mmc->max_phys_segs = 128;
+	mmc->max_hw_segs = 128;
+	mmc->max_blk_size = 512;
+	mmc->max_blk_count = 65535;
+	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+	mmc->max_seg_size = mmc->max_req_size;
+
+	sh_mmcif_sync_reset(host);
+	platform_set_drvdata(pdev, host);
+	mmc_add_host(mmc);
+
+	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	if (ret) {
+		pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
+		goto clean_up2;
+	}
+	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	if (ret) {
+		free_irq(irq[0], host);
+		pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
+		goto clean_up2;
+	}
+
+	sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+	sh_mmcif_detect(host->mmc);
+
+	pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
+	pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
+			sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
+	return ret;
+
+clean_up2:
+	clk_disable(host->hclk);
+clean_up1:
+	mmc_free_host(mmc);
+clean_up:
+	if (reg)
+		iounmap(reg);
+	return ret;
+}
+
+static int __devexit sh_mmcif_remove(struct platform_device *pdev)
+{
+	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+	int irq[2];
+
+	sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+
+	irq[0] = platform_get_irq(pdev, 0);
+	irq[1] = platform_get_irq(pdev, 1);
+
+	if (host->addr)
+		iounmap(host->addr);
+
+	platform_set_drvdata(pdev, NULL);
+	mmc_remove_host(host->mmc);
+
+	free_irq(irq[0], host);
+	free_irq(irq[1], host);
+
+	clk_disable(host->hclk);
+	mmc_free_host(host->mmc);
+
+	return 0;
+}
+
+static struct platform_driver sh_mmcif_driver = {
+	.probe		= sh_mmcif_probe,
+	.remove		= sh_mmcif_remove,
+	.driver		= {
+		.name	= DRIVER_NAME,
+	},
+};
+
+static int __init sh_mmcif_init(void)
+{
+	return platform_driver_register(&sh_mmcif_driver);
+}
+
+static void __exit sh_mmcif_exit(void)
+{
+	platform_driver_unregister(&sh_mmcif_driver);
+}
+
+module_init(sh_mmcif_init);
+module_exit(sh_mmcif_exit);
+
+
+MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554dd..cec9995 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@
 
 static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
 {
-	return mmc_suspend_host(tifm_get_drvdata(sock), state);
+	return mmc_suspend_host(tifm_get_drvdata(sock));
 }
 
 static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 883fcac..ee7d0a5 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -768,7 +768,7 @@
 	struct mmc_host *mmc = platform_get_drvdata(dev);
 	int ret;
 
-	ret = mmc_suspend_host(mmc, state);
+	ret = mmc_suspend_host(mmc);
 
 	/* Tell MFD core it can disable us now.*/
 	if (!ret && cell->disable)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a..19f2d72 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@
 	via_save_pcictrlreg(host);
 	via_save_sdcreg(host);
 
-	ret = mmc_suspend_host(host->mmc, state);
+	ret = mmc_suspend_host(host->mmc);
 
 	pci_save_state(pcidev);
 	pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01..0012f5d 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@
 {
 	BUG_ON(host == NULL);
 
-	return mmc_suspend_host(host->mmc, state);
+	return mmc_suspend_host(host->mmc);
 }
 
 static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822a..070211a 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@
 	---help---
 	  Amount of time a discovery node waits for a host to complete
 	  enumeration before giving up.
+
+config RAPIDIO_ENABLE_RX_TX_PORTS
+	bool "Enable RapidIO Input/Output Ports"
+	depends on RAPIDIO
+	---help---
+	  The RapidIO specification describes a Output port transmit
+	  enable and a Input port receive enable. The recommended state
+	  for Input ports and Output ports should be disabled. When
+	  this switch is set the RapidIO subsystem will enable all
+	  ports for Input/Output direction to allow other traffic
+	  than Maintenance transfers.
+
+source "drivers/rapidio/switches/Kconfig"
+
+config RAPIDIO_DEBUG
+	bool "RapidIO subsystem debug messages"
+	depends on RAPIDIO
+	help
+	  Say Y here if you want the RapidIO subsystem to produce a bunch of
+	  debug messages to the system log. Select this if you are having a
+	  problem with the RapidIO subsystem and want to see more of what is
+	  going on.
+
+	  If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e181..b6139fe 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
 obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
 
 obj-$(CONFIG_RAPIDIO)		+= switches/
+
+ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 4541509..5664321 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
  * Copyright 2005 MontaVista Software, Inc.
  * Matt Porter <mporter@kernel.crashing.org>
  *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write/Error Management initialization and handling
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - Added Input- Output- enable functionality, to allow full communication
+ *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
@@ -31,15 +39,16 @@
 LIST_HEAD(rio_devices);
 static LIST_HEAD(rio_switches);
 
-#define RIO_ENUM_CMPL_MAGIC	0xdeadbeef
-
 static void rio_enum_timeout(unsigned long);
 
+static void rio_init_em(struct rio_dev *rdev);
+
 DEFINE_SPINLOCK(rio_global_list_lock);
 
 static int next_destid = 0;
 static int next_switchid = 0;
 static int next_net = 0;
+static int next_comptag;
 
 static struct timer_list rio_enum_timer =
 TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@
 	-1,
 };
 
-static int rio_sport_phys_table[] = {
-	RIO_EFB_PAR_EP_FREE_ID,
-	RIO_EFB_SER_EP_FREE_ID,
-	-1,
-};
-
 /**
  * rio_get_device_id - Get the base/extended device id for a device
  * @port: RIO master port
@@ -118,12 +121,26 @@
 	u32 result;
 	int ret = 0;
 
-	/* Write component tag CSR magic complete value */
-	rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR,
-				  RIO_ENUM_CMPL_MAGIC);
-	list_for_each_entry(rdev, &rio_devices, global_list)
-	    rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR,
-				RIO_ENUM_CMPL_MAGIC);
+	/* Assign component tag to all devices */
+	next_comptag = 1;
+	rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
+
+	list_for_each_entry(rdev, &rio_devices, global_list) {
+		/* Mark device as discovered */
+		rio_read_config_32(rdev,
+				   rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+				   &result);
+		rio_write_config_32(rdev,
+				    rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+				    result | RIO_PORT_GEN_DISCOVERED);
+
+		rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
+		rdev->comp_tag = next_comptag++;
+		if (next_comptag >= 0x10000) {
+			pr_err("RIO: Component Tag Counter Overflow\n");
+			break;
+		}
+	}
 
 	/* Release host device id locks */
 	rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@
 }
 
 /**
- * rio_route_set_ops- Sets routing operations for a particular vendor switch
+ * rio_switch_init - Sets switch operations for a particular vendor switch
  * @rdev: RIO device
+ * @do_enum: Enumeration/Discovery mode flag
  *
- * Searches the RIO route ops table for known switch types. If the vid
- * and did match a switch table entry, then set the add_entry() and
- * get_entry() ops to the table entry values.
+ * Searches the RIO switch ops table for known switch types. If the vid
+ * and did match a switch table entry, then call switch initialization
+ * routine to setup switch-specific routines.
  */
-static void rio_route_set_ops(struct rio_dev *rdev)
+static void rio_switch_init(struct rio_dev *rdev, int do_enum)
 {
-	struct rio_route_ops *cur = __start_rio_route_ops;
-	struct rio_route_ops *end = __end_rio_route_ops;
+	struct rio_switch_ops *cur = __start_rio_switch_ops;
+	struct rio_switch_ops *end = __end_rio_switch_ops;
 
 	while (cur < end) {
 		if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
-			pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev));
-			rdev->rswitch->add_entry = cur->add_hook;
-			rdev->rswitch->get_entry = cur->get_hook;
+			pr_debug("RIO: calling init routine for %s\n",
+				 rio_name(rdev));
+			cur->init_hook(rdev, do_enum);
+			break;
 		}
 		cur++;
 	}
 
+	if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
+		pr_debug("RIO: adding STD routing ops for %s\n",
+			rio_name(rdev));
+		rdev->rswitch->add_entry = rio_std_route_add_entry;
+		rdev->rswitch->get_entry = rio_std_route_get_entry;
+		rdev->rswitch->clr_table = rio_std_route_clr_table;
+	}
+
 	if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
 		printk(KERN_ERR "RIO: missing routing ops for %s\n",
 		       rio_name(rdev));
@@ -281,6 +308,65 @@
 }
 
 /**
+ * rio_enable_rx_tx_port - enable input reciever and output transmitter of
+ * given port
+ * @port: Master port associated with the RIO network
+ * @local: local=1 select local port otherwise a far device is reached
+ * @destid: Destination ID of the device to check host bit
+ * @hopcount: Number of hops to reach the target
+ * @port_num: Port (-number on switch) to enable on a far end device
+ *
+ * Returns 0 or 1 from on General Control Command and Status Register
+ * (EXT_PTR+0x3C)
+ */
+inline int rio_enable_rx_tx_port(struct rio_mport *port,
+				 int local, u16 destid,
+				 u8 hopcount, u8 port_num) {
+#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
+	u32 regval;
+	u32 ext_ftr_ptr;
+
+	/*
+	* enable rx input tx output port
+	*/
+	pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
+		 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
+
+	ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+
+	if (local) {
+		rio_local_read_config_32(port, ext_ftr_ptr +
+				RIO_PORT_N_CTL_CSR(0),
+				&regval);
+	} else {
+		if (rio_mport_read_config_32(port, destid, hopcount,
+		ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+			return -EIO;
+	}
+
+	if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
+		/* serial */
+		regval = regval | RIO_PORT_N_CTL_EN_RX_SER
+				| RIO_PORT_N_CTL_EN_TX_SER;
+	} else {
+		/* parallel */
+		regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
+				| RIO_PORT_N_CTL_EN_TX_PAR;
+	}
+
+	if (local) {
+		rio_local_write_config_32(port, ext_ftr_ptr +
+					  RIO_PORT_N_CTL_CSR(0), regval);
+	} else {
+		if (rio_mport_write_config_32(port, destid, hopcount,
+		    ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+			return -EIO;
+	}
+#endif
+	return 0;
+}
+
+/**
  * rio_setup_device- Allocates and sets up a RIO device
  * @net: RIO network
  * @port: Master port to send transactions
@@ -325,8 +411,14 @@
 	rdev->asm_rev = result >> 16;
 	rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
 				 &rdev->pef);
-	if (rdev->pef & RIO_PEF_EXT_FEATURES)
+	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = result & 0xffff;
+		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
+							 hopcount);
+
+		rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
+						hopcount, RIO_EFB_ERR_MGMNT);
+	}
 
 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
 				 &rdev->src_ops);
@@ -349,12 +441,13 @@
 	if (rio_is_switch(rdev)) {
 		rio_mport_read_config_32(port, destid, hopcount,
 					 RIO_SWP_INFO_CAR, &rdev->swpinfo);
-		rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL);
+		rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
 		if (!rswitch)
 			goto cleanup;
 		rswitch->switchid = next_switchid;
 		rswitch->hopcount = hopcount;
 		rswitch->destid = destid;
+		rswitch->port_ok = 0;
 		rswitch->route_table = kzalloc(sizeof(u8)*
 					RIO_MAX_ROUTE_ENTRIES(port->sys_size),
 					GFP_KERNEL);
@@ -367,13 +460,22 @@
 		rdev->rswitch = rswitch;
 		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
 			     rdev->rswitch->switchid);
-		rio_route_set_ops(rdev);
+		rio_switch_init(rdev, do_enum);
+
+		if (do_enum && rdev->rswitch->clr_table)
+			rdev->rswitch->clr_table(port, destid, hopcount,
+						 RIO_GLOBAL_TABLE);
 
 		list_add_tail(&rswitch->node, &rio_switches);
 
-	} else
+	} else {
+		if (do_enum)
+			/*Enable Input Output Port (transmitter reviever)*/
+			rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
+
 		dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
 			     rdev->destid);
+	}
 
 	rdev->dev.bus = &rio_bus_type;
 
@@ -414,23 +516,29 @@
  *
  * Reads the port error status CSR for a particular switch port to
  * determine if the port has an active link.  Returns
- * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
+ * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
  * inactive.
  */
 static int
 rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
 {
-	u32 result;
+	u32 result = 0;
 	u32 ext_ftr_ptr;
 
-	int *entry = rio_sport_phys_table;
+	ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
 
-	do {
-		if ((ext_ftr_ptr =
-		     rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
-
+	while (ext_ftr_ptr) {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 ext_ftr_ptr, &result);
+		result = RIO_GET_BLOCK_ID(result);
+		if ((result == RIO_EFB_SER_EP_FREE_ID) ||
+		    (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
+		    (result == RIO_EFB_SER_EP_FREC_ID))
 			break;
-	} while (*++entry >= 0);
+
+		ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
+						ext_ftr_ptr);
+	}
 
 	if (ext_ftr_ptr)
 		rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@
 					 RIO_PORT_N_ERR_STS_CSR(sport),
 					 &result);
 
-	return (result & PORT_N_ERR_STS_PORT_OK);
+	return result & RIO_PORT_N_ERR_STS_PORT_OK;
+}
+
+/**
+ * rio_lock_device - Acquires host device lock for specified device
+ * @port: Master port to send transaction
+ * @destid: Destination ID for device/switch
+ * @hopcount: Hopcount to reach switch
+ * @wait_ms: Max wait time in msec (0 = no timeout)
+ *
+ * Attepts to acquire host device lock for specified device
+ * Returns 0 if device lock acquired or EINVAL if timeout expires.
+ */
+static int
+rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
+{
+	u32 result;
+	int tcnt = 0;
+
+	/* Attempt to acquire device lock */
+	rio_mport_write_config_32(port, destid, hopcount,
+				  RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
+	rio_mport_read_config_32(port, destid, hopcount,
+				 RIO_HOST_DID_LOCK_CSR, &result);
+
+	while (result != port->host_deviceid) {
+		if (wait_ms != 0 && tcnt == wait_ms) {
+			pr_debug("RIO: timeout when locking device %x:%x\n",
+				destid, hopcount);
+			return -EINVAL;
+		}
+
+		/* Delay a bit */
+		mdelay(1);
+		tcnt++;
+		/* Try to acquire device lock again */
+		rio_mport_write_config_32(port, destid,
+			hopcount,
+			RIO_HOST_DID_LOCK_CSR,
+			port->host_deviceid);
+		rio_mport_read_config_32(port, destid,
+			hopcount,
+			RIO_HOST_DID_LOCK_CSR, &result);
+	}
+
+	return 0;
+}
+
+/**
+ * rio_unlock_device - Releases host device lock for specified device
+ * @port: Master port to send transaction
+ * @destid: Destination ID for device/switch
+ * @hopcount: Hopcount to reach switch
+ *
+ * Returns 0 if device lock released or EINVAL if fails.
+ */
+static int
+rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
+{
+	u32 result;
+
+	/* Release device lock */
+	rio_mport_write_config_32(port, destid,
+				  hopcount,
+				  RIO_HOST_DID_LOCK_CSR,
+				  port->host_deviceid);
+	rio_mport_read_config_32(port, destid, hopcount,
+		RIO_HOST_DID_LOCK_CSR, &result);
+	if ((result & 0xffff) != 0xffff) {
+		pr_debug("RIO: badness when releasing device lock %x:%x\n",
+			 destid, hopcount);
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
 /**
@@ -448,6 +630,7 @@
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Port number to be routed
+ * @lock: lock switch device flag
  *
  * Calls the switch specific add_entry() method to add a route entry
  * on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@
  * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
  * on failure.
  */
-static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
-			       u16 table, u16 route_destid, u8 route_port)
+static int
+rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+		    u16 table, u16 route_destid, u8 route_port, int lock)
 {
-	return rswitch->add_entry(mport, rswitch->destid,
+	int rc;
+
+	if (lock) {
+		rc = rio_lock_device(mport, rswitch->destid,
+				     rswitch->hopcount, 1000);
+		if (rc)
+			return rc;
+	}
+
+	rc = rswitch->add_entry(mport, rswitch->destid,
 					rswitch->hopcount, table,
 					route_destid, route_port);
+	if (lock)
+		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+
+	return rc;
 }
 
 /**
@@ -471,6 +668,7 @@
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Pointer to read port number into
+ * @lock: lock switch device flag
  *
  * Calls the switch specific get_entry() method to read a route entry
  * in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@
  */
 static int
 rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
-		    u16 route_destid, u8 * route_port)
+		    u16 route_destid, u8 *route_port, int lock)
 {
-	return rswitch->get_entry(mport, rswitch->destid,
+	int rc;
+
+	if (lock) {
+		rc = rio_lock_device(mport, rswitch->destid,
+				     rswitch->hopcount, 1000);
+		if (rc)
+			return rc;
+	}
+
+	rc = rswitch->get_entry(mport, rswitch->destid,
 					rswitch->hopcount, table,
 					route_destid, route_port);
+	if (lock)
+		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+
+	return rc;
 }
 
 /**
@@ -625,14 +836,14 @@
 		sw_inport = rio_get_swpinfo_inport(port,
 				RIO_ANY_DESTID(port->sys_size), hopcount);
 		rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
-				    port->host_deviceid, sw_inport);
+				    port->host_deviceid, sw_inport, 0);
 		rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
 
 		for (destid = 0; destid < next_destid; destid++) {
 			if (destid == port->host_deviceid)
 				continue;
 			rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
-					    destid, sw_inport);
+					    destid, sw_inport, 0);
 			rdev->rswitch->route_table[destid] = sw_inport;
 		}
 
@@ -644,8 +855,15 @@
 		    rio_name(rdev), rdev->vid, rdev->did, num_ports);
 		sw_destid = next_destid;
 		for (port_num = 0; port_num < num_ports; port_num++) {
-			if (sw_inport == port_num)
+			/*Enable Input Output Port (transmitter reviever)*/
+			rio_enable_rx_tx_port(port, 0,
+					      RIO_ANY_DESTID(port->sys_size),
+					      hopcount, port_num);
+
+			if (sw_inport == port_num) {
+				rdev->rswitch->port_ok |= (1 << port_num);
 				continue;
+			}
 
 			cur_destid = next_destid;
 
@@ -655,10 +873,11 @@
 				pr_debug(
 				    "RIO: scanning device on port %d\n",
 				    port_num);
+				rdev->rswitch->port_ok |= (1 << port_num);
 				rio_route_add_entry(port, rdev->rswitch,
 						RIO_GLOBAL_TABLE,
 						RIO_ANY_DESTID(port->sys_size),
-						port_num);
+						port_num, 0);
 
 				if (rio_enum_peer(net, port, hopcount + 1) < 0)
 					return -1;
@@ -672,15 +891,35 @@
 						rio_route_add_entry(port, rdev->rswitch,
 								    RIO_GLOBAL_TABLE,
 								    destid,
-								    port_num);
+								    port_num,
+								    0);
 						rdev->rswitch->
 						    route_table[destid] =
 						    port_num;
 					}
 				}
+			} else {
+				/* If switch supports Error Management,
+				 * set PORT_LOCKOUT bit for unused port
+				 */
+				if (rdev->em_efptr)
+					rio_set_port_lockout(rdev, port_num, 1);
+
+				rdev->rswitch->port_ok &= ~(1 << port_num);
 			}
 		}
 
+		/* Direct Port-write messages to the enumeratiing host */
+		if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
+		    (rdev->em_efptr)) {
+			rio_write_config_32(rdev,
+					rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
+					(port->host_deviceid << 16) |
+					(port->sys_size << 15));
+		}
+
+		rio_init_em(rdev);
+
 		/* Check for empty switch */
 		if (next_destid == sw_destid) {
 			next_destid++;
@@ -700,21 +939,16 @@
  * rio_enum_complete- Tests if enumeration of a network is complete
  * @port: Master port to send transaction
  *
- * Tests the Component Tag CSR for presence of the magic enumeration
- * complete flag. Return %1 if enumeration is complete or %0 if
+ * Tests the Component Tag CSR for non-zero value (enumeration
+ * complete flag). Return %1 if enumeration is complete or %0 if
  * enumeration is incomplete.
  */
 static int rio_enum_complete(struct rio_mport *port)
 {
 	u32 tag_csr;
-	int ret = 0;
 
 	rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
-
-	if (tag_csr == RIO_ENUM_CMPL_MAGIC)
-		ret = 1;
-
-	return ret;
+	return (tag_csr & 0xffff) ? 1 : 0;
 }
 
 /**
@@ -763,17 +997,21 @@
 				pr_debug(
 				    "RIO: scanning device on port %d\n",
 				    port_num);
+
+				rio_lock_device(port, destid, hopcount, 1000);
+
 				for (ndestid = 0;
 				     ndestid < RIO_ANY_DESTID(port->sys_size);
 				     ndestid++) {
 					rio_route_get_entry(port, rdev->rswitch,
 							    RIO_GLOBAL_TABLE,
 							    ndestid,
-							    &route_port);
+							    &route_port, 0);
 					if (route_port == port_num)
 						break;
 				}
 
+				rio_unlock_device(port, destid, hopcount);
 				if (rio_disc_peer
 				    (net, port, ndestid, hopcount + 1) < 0)
 					return -1;
@@ -792,7 +1030,7 @@
  *
  * Reads the port error status CSR for the master port to
  * determine if the port has an active link.  Returns
- * %PORT_N_ERR_STS_PORT_OK if the  master port is active
+ * %RIO_PORT_N_ERR_STS_PORT_OK if the  master port is active
  * or %0 if it is inactive.
  */
 static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@
 					 RIO_PORT_N_ERR_STS_CSR(port->index),
 					 &result);
 
-	return (result & PORT_N_ERR_STS_PORT_OK);
+	return result & RIO_PORT_N_ERR_STS_PORT_OK;
 }
 
 /**
@@ -866,12 +1104,17 @@
 				continue;
 
 			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+				/* Skip if destid ends in empty switch*/
+				if (rswitch->destid == destid)
+					continue;
 
 				sport = rio_get_swpinfo_inport(port,
 						rswitch->destid, rswitch->hopcount);
 
 				if (rswitch->add_entry)	{
-					rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport);
+					rio_route_add_entry(port, rswitch,
+						RIO_GLOBAL_TABLE, destid,
+						sport, 0);
 					rswitch->route_table[destid] = sport;
 				}
 			}
@@ -880,6 +1123,32 @@
 }
 
 /**
+ * rio_init_em - Initializes RIO Error Management (for switches)
+ * @port: Master port associated with the RIO network
+ *
+ * For each enumerated switch, call device-specific error management
+ * initialization routine (if supplied by the switch driver).
+ */
+static void rio_init_em(struct rio_dev *rdev)
+{
+	if (rio_is_switch(rdev) && (rdev->em_efptr) &&
+	    (rdev->rswitch->em_init)) {
+		rdev->rswitch->em_init(rdev);
+	}
+}
+
+/**
+ * rio_pw_enable - Enables/disables port-write handling by a master port
+ * @port: Master port associated with port-write handling
+ * @enable:  1=enable,  0=disable
+ */
+static void rio_pw_enable(struct rio_mport *port, int enable)
+{
+	if (port->ops->pwenable)
+		port->ops->pwenable(port, enable);
+}
+
+/**
  * rio_enum_mport- Start enumeration through a master port
  * @mport: Master port to send transactions
  *
@@ -911,6 +1180,10 @@
 			rc = -ENOMEM;
 			goto out;
 		}
+
+		/* Enable Input Output Port (transmitter reviever) */
+		rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
+
 		if (rio_enum_peer(net, mport, 0) < 0) {
 			/* A higher priority host won enumeration, bail. */
 			printk(KERN_INFO
@@ -922,6 +1195,7 @@
 		}
 		rio_update_route_tables(mport);
 		rio_clear_locks(mport);
+		rio_pw_enable(mport, 1);
 	} else {
 		printk(KERN_INFO "RIO: master port %d link inactive\n",
 		       mport->id);
@@ -945,15 +1219,22 @@
 	u8 sport;
 
 	list_for_each_entry(rdev, &rio_devices, global_list)
-	    if (rio_is_switch(rdev))
-		for (i = 0;
-		     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
-		     i++) {
-			if (rio_route_get_entry
-			    (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE,
-			     i, &sport) < 0)
-				continue;
-			rdev->rswitch->route_table[i] = sport;
+		if (rio_is_switch(rdev)) {
+			rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
+					rdev->rswitch->hopcount, 1000);
+			for (i = 0;
+			     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
+			     i++) {
+				if (rio_route_get_entry
+				    (rdev->net->hport, rdev->rswitch,
+				     RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+					continue;
+				rdev->rswitch->route_table[i] = sport;
+			}
+
+			rio_unlock_device(rdev->net->hport,
+					  rdev->rswitch->destid,
+					  rdev->rswitch->hopcount);
 		}
 }
 
@@ -1012,6 +1293,13 @@
 		del_timer_sync(&rio_enum_timer);
 
 		pr_debug("done\n");
+
+		/* Read DestID assigned by enumerator */
+		rio_local_read_config_32(mport, RIO_DID_CSR,
+					 &mport->host_deviceid);
+		mport->host_deviceid = RIO_GET_DID(mport->sys_size,
+						   mport->host_deviceid);
+
 		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
 					0) < 0) {
 			printk(KERN_INFO
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c78..777e099 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
  * Copyright 2005 MontaVista Software, Inc.
  * Matt Porter <mporter@kernel.crashing.org>
  *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write/Error Management initialization and handling
+ *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
@@ -333,6 +337,328 @@
 }
 
 /**
+ * rio_request_inb_pwrite - request inbound port-write message service
+ * @mport: RIO device to which register inbound port-write callback routine
+ * @pwcback: Callback routine to execute when port-write is received
+ *
+ * Binds a port-write callback function to the RapidIO device.
+ * Returns 0 if the request has been satisfied.
+ */
+int rio_request_inb_pwrite(struct rio_dev *rdev,
+	int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
+{
+	int rc = 0;
+
+	spin_lock(&rio_global_list_lock);
+	if (rdev->pwcback != NULL)
+		rc = -ENOMEM;
+	else
+		rdev->pwcback = pwcback;
+
+	spin_unlock(&rio_global_list_lock);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
+
+/**
+ * rio_release_inb_pwrite - release inbound port-write message service
+ * @rdev: RIO device which registered for inbound port-write callback
+ *
+ * Removes callback from the rio_dev structure. Returns 0 if the request
+ * has been satisfied.
+ */
+int rio_release_inb_pwrite(struct rio_dev *rdev)
+{
+	int rc = -ENOMEM;
+
+	spin_lock(&rio_global_list_lock);
+	if (rdev->pwcback) {
+		rdev->pwcback = NULL;
+		rc = 0;
+	}
+
+	spin_unlock(&rio_global_list_lock);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
+
+/**
+ * rio_mport_get_physefb - Helper function that returns register offset
+ *                      for Physical Layer Extended Features Block.
+ * @rdev: RIO device
+ */
+u32
+rio_mport_get_physefb(struct rio_mport *port, int local,
+		      u16 destid, u8 hopcount)
+{
+	u32 ext_ftr_ptr;
+	u32 ftr_header;
+
+	ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
+
+	while (ext_ftr_ptr)  {
+		if (local)
+			rio_local_read_config_32(port, ext_ftr_ptr,
+						 &ftr_header);
+		else
+			rio_mport_read_config_32(port, destid, hopcount,
+						 ext_ftr_ptr, &ftr_header);
+
+		ftr_header = RIO_GET_BLOCK_ID(ftr_header);
+		switch (ftr_header) {
+
+		case RIO_EFB_SER_EP_ID_V13P:
+		case RIO_EFB_SER_EP_REC_ID_V13P:
+		case RIO_EFB_SER_EP_FREE_ID_V13P:
+		case RIO_EFB_SER_EP_ID:
+		case RIO_EFB_SER_EP_REC_ID:
+		case RIO_EFB_SER_EP_FREE_ID:
+		case RIO_EFB_SER_EP_FREC_ID:
+
+			return ext_ftr_ptr;
+
+		default:
+			break;
+		}
+
+		ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
+						hopcount, ext_ftr_ptr);
+	}
+
+	return ext_ftr_ptr;
+}
+
+/**
+ * rio_get_comptag - Begin or continue searching for a RIO device by component tag
+ * @comp_tag: RIO component tad to match
+ * @from: Previous RIO device found in search, or %NULL for new search
+ *
+ * Iterates through the list of known RIO devices. If a RIO device is
+ * found with a matching @comp_tag, a pointer to its device
+ * structure is returned. Otherwise, %NULL is returned. A new search
+ * is initiated by passing %NULL to the @from argument. Otherwise, if
+ * @from is not %NULL, searches continue from next device on the global
+ * list.
+ */
+static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+{
+	struct list_head *n;
+	struct rio_dev *rdev;
+
+	spin_lock(&rio_global_list_lock);
+	n = from ? from->global_list.next : rio_devices.next;
+
+	while (n && (n != &rio_devices)) {
+		rdev = rio_dev_g(n);
+		if (rdev->comp_tag == comp_tag)
+			goto exit;
+		n = n->next;
+	}
+	rdev = NULL;
+exit:
+	spin_unlock(&rio_global_list_lock);
+	return rdev;
+}
+
+/**
+ * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to set LOCKOUT bit
+ * @lock: Operation : set (=1) or clear (=0)
+ */
+int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
+{
+	u8 hopcount = 0xff;
+	u16 destid = rdev->destid;
+	u32 regval;
+
+	if (rdev->rswitch) {
+		destid = rdev->rswitch->destid;
+		hopcount = rdev->rswitch->hopcount;
+	}
+
+	rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
+				 &regval);
+	if (lock)
+		regval |= RIO_PORT_N_CTL_LOCKOUT;
+	else
+		regval &= ~RIO_PORT_N_CTL_LOCKOUT;
+
+	rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
+				  regval);
+	return 0;
+}
+
+/**
+ * rio_inb_pwrite_handler - process inbound port-write message
+ * @pw_msg: pointer to inbound port-write message
+ *
+ * Processes an inbound port-write message. Returns 0 if the request
+ * has been satisfied.
+ */
+int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
+{
+	struct rio_dev *rdev;
+	struct rio_mport *mport;
+	u8 hopcount;
+	u16 destid;
+	u32 err_status;
+	int rc, portnum;
+
+	rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+	if (rdev == NULL) {
+		/* Someting bad here (probably enumeration error) */
+		pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+			__func__, pw_msg->em.comptag);
+		return -EIO;
+	}
+
+	pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
+
+#ifdef DEBUG_PW
+	{
+	u32 i;
+	for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
+			pr_debug("0x%02x: %08x %08x %08x %08x",
+				 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
+				 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
+			i += 4;
+	}
+	pr_debug("\n");
+	}
+#endif
+
+	/* Call an external service function (if such is registered
+	 * for this device). This may be the service for endpoints that send
+	 * device-specific port-write messages. End-point messages expected
+	 * to be handled completely by EP specific device driver.
+	 * For switches rc==0 signals that no standard processing required.
+	 */
+	if (rdev->pwcback != NULL) {
+		rc = rdev->pwcback(rdev, pw_msg, 0);
+		if (rc == 0)
+			return 0;
+	}
+
+	/* For End-point devices processing stops here */
+	if (!(rdev->pef & RIO_PEF_SWITCH))
+		return 0;
+
+	if (rdev->phys_efptr == 0) {
+		pr_err("RIO_PW: Bad switch initialization for %s\n",
+			rio_name(rdev));
+		return 0;
+	}
+
+	mport = rdev->net->hport;
+	destid = rdev->rswitch->destid;
+	hopcount = rdev->rswitch->hopcount;
+
+	/*
+	 * Process the port-write notification from switch
+	 */
+
+	portnum = pw_msg->em.is_port & 0xFF;
+
+	if (rdev->rswitch->em_handle)
+		rdev->rswitch->em_handle(rdev, portnum);
+
+	rio_mport_read_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+			&err_status);
+	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
+
+	if (pw_msg->em.errdetect) {
+		pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+			 portnum, pw_msg->em.errdetect);
+		/* Clear EM Port N Error Detect CSR */
+		rio_mport_write_config_32(mport, destid, hopcount,
+			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+	}
+
+	if (pw_msg->em.ltlerrdet) {
+		pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+			 pw_msg->em.ltlerrdet);
+		/* Clear EM L/T Layer Error Detect CSR */
+		rio_mport_write_config_32(mport, destid, hopcount,
+			rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
+	}
+
+	/* Clear Port Errors */
+	rio_mport_write_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+			err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+
+	if (rdev->rswitch->port_ok & (1 << portnum)) {
+		if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+			rdev->rswitch->port_ok &= ~(1 << portnum);
+			rio_set_port_lockout(rdev, portnum, 1);
+
+			rio_mport_write_config_32(mport, destid, hopcount,
+				rdev->phys_efptr +
+					RIO_PORT_N_ACK_STS_CSR(portnum),
+				RIO_PORT_N_ACK_CLEAR);
+
+			/* Schedule Extraction Service */
+			pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
+			       rio_name(rdev), portnum);
+		}
+	} else {
+		if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
+			rdev->rswitch->port_ok |= (1 << portnum);
+			rio_set_port_lockout(rdev, portnum, 0);
+
+			/* Schedule Insertion Service */
+			pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+			       rio_name(rdev), portnum);
+		}
+	}
+
+	/* Clear Port-Write Pending bit */
+	rio_mport_write_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+			RIO_PORT_N_ERR_STS_PW_PEND);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
+
+/**
+ * rio_mport_get_efb - get pointer to next extended features block
+ * @port: Master port to issue transaction
+ * @local: Indicate a local master port or remote device access
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @from: Offset of  current Extended Feature block header (if 0 starts
+ * from	ExtFeaturePtr)
+ */
+u32
+rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
+		      u8 hopcount, u32 from)
+{
+	u32 reg_val;
+
+	if (from == 0) {
+		if (local)
+			rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
+						 &reg_val);
+		else
+			rio_mport_read_config_32(port, destid, hopcount,
+						 RIO_ASM_INFO_CAR, &reg_val);
+		return reg_val & RIO_EXT_FTR_PTR_MASK;
+	} else {
+		if (local)
+			rio_local_read_config_32(port, from, &reg_val);
+		else
+			rio_mport_read_config_32(port, destid, hopcount,
+						 from, &reg_val);
+		return RIO_GET_BLOCK_ID(reg_val);
+	}
+}
+
+/**
  * rio_mport_get_feature - query for devices' extended features
  * @port: Master port to issue transaction
  * @local: Indicate a local master port or remote device access
@@ -451,6 +777,111 @@
 	return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
 }
 
+/**
+ * rio_std_route_add_entry - Add switch route table entry using standard
+ *   registers defined in RIO specification rev.1.3
+ * @mport: Master port to issue transaction
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ * @route_destid: destID entry in the RT
+ * @route_port: destination port for specified destID
+ */
+int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 route_port)
+{
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+				(u32)route_destid);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR,
+				(u32)route_port);
+	}
+
+	udelay(10);
+	return 0;
+}
+
+/**
+ * rio_std_route_get_entry - Read switch route table entry (port number)
+ *   assosiated with specified destID using standard registers defined in RIO
+ *   specification rev.1.3
+ * @mport: Master port to issue transaction
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ * @route_destid: destID entry in the RT
+ * @route_port: returned destination port for specified destID
+ */
+int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 *route_port)
+{
+	u32 result;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+		rio_mport_read_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+		*route_port = (u8)result;
+	}
+
+	return 0;
+}
+
+/**
+ * rio_std_route_clr_table - Clear swotch route table using standard registers
+ *   defined in RIO specification rev.1.3.
+ * @mport: Master port to issue transaction
+ * @local: Indicate a local master port or remote device access
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ */
+int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table)
+{
+	u32 max_destid = 0xff;
+	u32 i, pef, id_inc = 1, ext_cfg = 0;
+	u32 port_sel = RIO_INVALID_ROUTE;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_read_config_32(mport, destid, hopcount,
+					 RIO_PEF_CAR, &pef);
+
+		if (mport->sys_size) {
+			rio_mport_read_config_32(mport, destid, hopcount,
+						 RIO_SWITCH_RT_LIMIT,
+						 &max_destid);
+			max_destid &= RIO_RT_MAX_DESTID;
+		}
+
+		if (pef & RIO_PEF_EXT_RT) {
+			ext_cfg = 0x80000000;
+			id_inc = 4;
+			port_sel = (RIO_INVALID_ROUTE << 24) |
+				   (RIO_INVALID_ROUTE << 16) |
+				   (RIO_INVALID_ROUTE << 8) |
+				   RIO_INVALID_ROUTE;
+		}
+
+		for (i = 0; i <= max_destid;) {
+			rio_mport_write_config_32(mport, destid, hopcount,
+					RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+					ext_cfg | i);
+			rio_mport_write_config_32(mport, destid, hopcount,
+					RIO_STD_RTE_CONF_PORT_SEL_CSR,
+					port_sel);
+			i += id_inc;
+		}
+	}
+
+	udelay(10);
+	return 0;
+}
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02..f27b7a9 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
 
 extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
 				 u8 hopcount, int ftr);
+extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
+				 u16 destid, u8 hopcount);
+extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
+			     u8 hopcount, u32 from);
 extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
 extern int rio_enum_mport(struct rio_mport *mport);
 extern int rio_disc_mport(struct rio_mport *mport);
+extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
+				   u8 hopcount, u16 table, u16 route_destid,
+				   u8 route_port);
+extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
+				   u8 hopcount, u16 table, u16 route_destid,
+				   u8 *route_port);
+extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
+				   u8 hopcount, u16 table);
+extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
 
 /* Structures internal to the RIO core code */
 extern struct device_attribute rio_dev_attrs[];
 extern spinlock_t rio_global_list_lock;
 
-extern struct rio_route_ops __start_rio_route_ops[];
-extern struct rio_route_ops __end_rio_route_ops[];
+extern struct rio_switch_ops __start_rio_switch_ops[];
+extern struct rio_switch_ops __end_rio_switch_ops[];
 
 /* Helpers internal to the RIO core code */
-#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook)  \
-	static struct rio_route_ops __rio_route_ops __used   \
-	__section(section)= { vid, did, add_hook, get_hook };
+#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
+	static const struct rio_switch_ops __rio_switch_##name __used \
+	__section(section) = { vid, did, init_hook };
 
 /**
- * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations
+ * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
  * @vid: RIO vendor ID
  * @did: RIO device ID
- * @add_hook: Callback that adds a route entry
- * @get_hook: Callback that gets a route entry
+ * @init_hook: Callback that performs switch-specific initialization
  *
- * Manipulating switch route tables in RIO is switch specific. This
- * registers a switch by vendor and device ID with two callbacks for
- * modifying and retrieving route entries in a switch. A &struct
- * rio_route_ops is initialized with the ops and placed into a
- * RIO-specific kernel section.
+ * Manipulating switch route tables and error management in RIO
+ * is switch specific. This registers a switch by vendor and device ID with
+ * initialization callback for setting up switch operations and (if required)
+ * hardware initialization. A &struct rio_switch_ops is initialized with
+ * pointer to the init routine and placed into a RIO-specific kernel section.
  */
-#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook)		\
-	DECLARE_RIO_ROUTE_SECTION(.rio_route_ops,			\
-			vid, did, add_hook, get_hook)
+#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook)		\
+	DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
+			vid, did, init_hook)
 
 #define RIO_GET_DID(size, x)	(size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
 #define RIO_SET_DID(size, x)	(size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 0000000..2b4e9b2
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
+#
+# RapidIO switches configuration
+#
+config RAPIDIO_TSI57X
+	bool "IDT Tsi57x SRIO switches support"
+	depends on RAPIDIO
+	---help---
+	  Includes support for IDT Tsi57x family of serial RapidIO switches.
+
+config RAPIDIO_CPS_XX
+	bool "IDT CPS-xx SRIO switches support"
+	depends on RAPIDIO
+	---help---
+	  Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
+
+config RAPIDIO_TSI568
+	bool "Tsi568 SRIO switch support"
+	depends on RAPIDIO
+	default n
+	---help---
+	  Includes support for IDT Tsi568 serial RapidIO switch.
+
+config RAPIDIO_TSI500
+	bool "Tsi500 Parallel RapidIO switch support"
+	depends on RAPIDIO
+	default n
+	---help---
+	  Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f83..fe4adc3 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
 # Makefile for RIO switches
 #
 
-obj-$(CONFIG_RAPIDIO)	+= tsi500.o
+obj-$(CONFIG_RAPIDIO_TSI57X)	+= tsi57x.o
+obj-$(CONFIG_RAPIDIO_CPS_XX)	+= idtcps.o
+obj-$(CONFIG_RAPIDIO_TSI568)	+= tsi568.o
+obj-$(CONFIG_RAPIDIO_TSI500)	+= tsi500.o
+
+ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 0000000..2c790c1
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
+/*
+ * IDT CPS RapidIO switches support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include "../rio.h"
+
+#define CPS_DEFAULT_ROUTE	0xde
+#define CPS_NO_ROUTE		0xdf
+
+#define IDTCPS_RIO_DOMAIN 0xf20020
+
+static int
+idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 route_port)
+{
+	u32 result;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+
+		rio_mport_read_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+		result = (0xffffff00 & result) | (u32)route_port;
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
+	}
+
+	return 0;
+}
+
+static int
+idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 *route_port)
+{
+	u32 result;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+
+		rio_mport_read_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+		if (CPS_DEFAULT_ROUTE == (u8)result ||
+		    CPS_NO_ROUTE == (u8)result)
+			*route_port = RIO_INVALID_ROUTE;
+		else
+			*route_port = (u8)result;
+	}
+
+	return 0;
+}
+
+static int
+idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table)
+{
+	u32 i;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		for (i = 0x80000000; i <= 0x800000ff;) {
+			rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+			rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_STD_RTE_CONF_PORT_SEL_CSR,
+				(CPS_DEFAULT_ROUTE << 24) |
+				(CPS_DEFAULT_ROUTE << 16) |
+				(CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
+			i += 4;
+		}
+	}
+
+	return 0;
+}
+
+static int
+idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u8 sw_domain)
+{
+	/*
+	 * Switch domain configuration operates only at global level
+	 */
+	rio_mport_write_config_32(mport, destid, hopcount,
+				  IDTCPS_RIO_DOMAIN, (u32)sw_domain);
+	return 0;
+}
+
+static int
+idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u8 *sw_domain)
+{
+	u32 regval;
+
+	/*
+	 * Switch domain configuration operates only at global level
+	 */
+	rio_mport_read_config_32(mport, destid, hopcount,
+				IDTCPS_RIO_DOMAIN, &regval);
+
+	*sw_domain = (u8)(regval & 0xff);
+
+	return 0;
+}
+
+static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+	rdev->rswitch->add_entry = idtcps_route_add_entry;
+	rdev->rswitch->get_entry = idtcps_route_get_entry;
+	rdev->rswitch->clr_table = idtcps_route_clr_table;
+	rdev->rswitch->set_domain = idtcps_set_domain;
+	rdev->rswitch->get_domain = idtcps_get_domain;
+	rdev->rswitch->em_init = NULL;
+	rdev->rswitch->em_handle = NULL;
+
+	return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23b..914eddd 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
 /*
  * RapidIO Tsi500 switch support
  *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *  - Modified switch operations initialization.
+ *
  * Copyright 2005 MontaVista Software, Inc.
  * Matt Porter <mporter@kernel.crashing.org>
  *
@@ -57,4 +61,18 @@
 	return ret;
 }
 
-DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry);
+static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+	rdev->rswitch->add_entry = tsi500_route_add_entry;
+	rdev->rswitch->get_entry = tsi500_route_get_entry;
+	rdev->rswitch->clr_table = NULL;
+	rdev->rswitch->set_domain = NULL;
+	rdev->rswitch->get_domain = NULL;
+	rdev->rswitch->em_init = NULL;
+	rdev->rswitch->em_handle = NULL;
+
+	return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 0000000..f7fd789
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
+/*
+ * RapidIO Tsi568 switch support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *  - Added EM support
+ *  - Modified switch operations initialization.
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+/* Global (broadcast) route registers */
+#define SPBC_ROUTE_CFG_DESTID	0x10070
+#define SPBC_ROUTE_CFG_PORT	0x10074
+
+/* Per port route registers */
+#define SPP_ROUTE_CFG_DESTID(n)	(0x11070 + 0x100*n)
+#define SPP_ROUTE_CFG_PORT(n)	(0x11074 + 0x100*n)
+
+#define TSI568_SP_MODE_BC	0x10004
+#define  TSI568_SP_MODE_PW_DIS	0x08000000
+
+static int
+tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 route_port)
+{
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPBC_ROUTE_CFG_DESTID, route_destid);
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPBC_ROUTE_CFG_PORT, route_port);
+	} else {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPP_ROUTE_CFG_DESTID(table),
+					route_destid);
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPP_ROUTE_CFG_PORT(table), route_port);
+	}
+
+	udelay(10);
+
+	return 0;
+}
+
+static int
+tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 *route_port)
+{
+	int ret = 0;
+	u32 result;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPBC_ROUTE_CFG_DESTID, route_destid);
+		rio_mport_read_config_32(mport, destid, hopcount,
+					SPBC_ROUTE_CFG_PORT, &result);
+	} else {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPP_ROUTE_CFG_DESTID(table),
+					route_destid);
+		rio_mport_read_config_32(mport, destid, hopcount,
+					SPP_ROUTE_CFG_PORT(table), &result);
+	}
+
+	*route_port = result;
+	if (*route_port > 15)
+		ret = -1;
+
+	return ret;
+}
+
+static int
+tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table)
+{
+	u32 route_idx;
+	u32 lut_size;
+
+	lut_size = (mport->sys_size) ? 0x1ff : 0xff;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPBC_ROUTE_CFG_DESTID, 0x80000000);
+		for (route_idx = 0; route_idx <= lut_size; route_idx++)
+			rio_mport_write_config_32(mport, destid, hopcount,
+						SPBC_ROUTE_CFG_PORT,
+						RIO_INVALID_ROUTE);
+	} else {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					SPP_ROUTE_CFG_DESTID(table),
+					0x80000000);
+		for (route_idx = 0; route_idx <= lut_size; route_idx++)
+			rio_mport_write_config_32(mport, destid, hopcount,
+						SPP_ROUTE_CFG_PORT(table),
+						RIO_INVALID_ROUTE);
+	}
+
+	return 0;
+}
+
+static int
+tsi568_em_init(struct rio_dev *rdev)
+{
+	struct rio_mport *mport = rdev->net->hport;
+	u16 destid = rdev->rswitch->destid;
+	u8 hopcount = rdev->rswitch->hopcount;
+	u32 regval;
+
+	pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+
+	/* Make sure that Port-Writes are disabled (for all ports) */
+	rio_mport_read_config_32(mport, destid, hopcount,
+			TSI568_SP_MODE_BC, &regval);
+	rio_mport_write_config_32(mport, destid, hopcount,
+			TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+
+	return 0;
+}
+
+static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+	rdev->rswitch->add_entry = tsi568_route_add_entry;
+	rdev->rswitch->get_entry = tsi568_route_get_entry;
+	rdev->rswitch->clr_table = tsi568_route_clr_table;
+	rdev->rswitch->set_domain = NULL;
+	rdev->rswitch->get_domain = NULL;
+	rdev->rswitch->em_init = tsi568_em_init;
+	rdev->rswitch->em_handle = NULL;
+
+	return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 0000000..d34df72
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
+/*
+ * RapidIO Tsi57x switch family support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *  - Added EM support
+ *  - Modified switch operations initialization.
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+/* Global (broadcast) route registers */
+#define SPBC_ROUTE_CFG_DESTID	0x10070
+#define SPBC_ROUTE_CFG_PORT	0x10074
+
+/* Per port route registers */
+#define SPP_ROUTE_CFG_DESTID(n)	(0x11070 + 0x100*n)
+#define SPP_ROUTE_CFG_PORT(n)	(0x11074 + 0x100*n)
+
+#define TSI578_SP_MODE(n)	(0x11004 + n*0x100)
+#define TSI578_SP_MODE_GLBL	0x10004
+#define  TSI578_SP_MODE_PW_DIS	0x08000000
+#define  TSI578_SP_MODE_LUT_512	0x01000000
+
+#define TSI578_SP_CTL_INDEP(n)	(0x13004 + n*0x100)
+#define TSI578_SP_LUT_PEINF(n)	(0x13010 + n*0x100)
+#define TSI578_SP_CS_TX(n)	(0x13014 + n*0x100)
+#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
+
+#define TSI578_GLBL_ROUTE_BASE	0x10078
+
+static int
+tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 route_port)
+{
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					  SPBC_ROUTE_CFG_DESTID, route_destid);
+		rio_mport_write_config_32(mport, destid, hopcount,
+					  SPBC_ROUTE_CFG_PORT, route_port);
+	} else {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_DESTID(table), route_destid);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_PORT(table), route_port);
+	}
+
+	udelay(10);
+
+	return 0;
+}
+
+static int
+tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 *route_port)
+{
+	int ret = 0;
+	u32 result;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		/* Use local RT of the ingress port to avoid possible
+		   race condition */
+		rio_mport_read_config_32(mport, destid, hopcount,
+			RIO_SWP_INFO_CAR, &result);
+		table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
+	}
+
+	rio_mport_write_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_DESTID(table), route_destid);
+	rio_mport_read_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_PORT(table), &result);
+
+	*route_port = (u8)result;
+	if (*route_port > 15)
+		ret = -1;
+
+	return ret;
+}
+
+static int
+tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table)
+{
+	u32 route_idx;
+	u32 lut_size;
+
+	lut_size = (mport->sys_size) ? 0x1ff : 0xff;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		rio_mport_write_config_32(mport, destid, hopcount,
+					  SPBC_ROUTE_CFG_DESTID, 0x80000000);
+		for (route_idx = 0; route_idx <= lut_size; route_idx++)
+			rio_mport_write_config_32(mport, destid, hopcount,
+						  SPBC_ROUTE_CFG_PORT,
+						  RIO_INVALID_ROUTE);
+	} else {
+		rio_mport_write_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_DESTID(table), 0x80000000);
+		for (route_idx = 0; route_idx <= lut_size; route_idx++)
+			rio_mport_write_config_32(mport, destid, hopcount,
+				SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
+	}
+
+	return 0;
+}
+
+static int
+tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u8 sw_domain)
+{
+	u32 regval;
+
+	/*
+	 * Switch domain configuration operates only at global level
+	 */
+
+	/* Turn off flat (LUT_512) mode */
+	rio_mport_read_config_32(mport, destid, hopcount,
+				 TSI578_SP_MODE_GLBL, &regval);
+	rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
+				  regval & ~TSI578_SP_MODE_LUT_512);
+	/* Set switch domain base */
+	rio_mport_write_config_32(mport, destid, hopcount,
+				  TSI578_GLBL_ROUTE_BASE,
+				  (u32)(sw_domain << 24));
+	return 0;
+}
+
+static int
+tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u8 *sw_domain)
+{
+	u32 regval;
+
+	/*
+	 * Switch domain configuration operates only at global level
+	 */
+	rio_mport_read_config_32(mport, destid, hopcount,
+				TSI578_GLBL_ROUTE_BASE, &regval);
+
+	*sw_domain = (u8)(regval >> 24);
+
+	return 0;
+}
+
+static int
+tsi57x_em_init(struct rio_dev *rdev)
+{
+	struct rio_mport *mport = rdev->net->hport;
+	u16 destid = rdev->rswitch->destid;
+	u8 hopcount = rdev->rswitch->hopcount;
+	u32 regval;
+	int portnum;
+
+	pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+
+	for (portnum = 0; portnum < 16; portnum++) {
+		/* Make sure that Port-Writes are enabled (for all ports) */
+		rio_mport_read_config_32(mport, destid, hopcount,
+				TSI578_SP_MODE(portnum), &regval);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				TSI578_SP_MODE(portnum),
+				regval & ~TSI578_SP_MODE_PW_DIS);
+
+		/* Clear all pending interrupts */
+		rio_mport_read_config_32(mport, destid, hopcount,
+				rdev->phys_efptr +
+					RIO_PORT_N_ERR_STS_CSR(portnum),
+				&regval);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				rdev->phys_efptr +
+					RIO_PORT_N_ERR_STS_CSR(portnum),
+				regval & 0x07120214);
+
+		rio_mport_read_config_32(mport, destid, hopcount,
+				TSI578_SP_INT_STATUS(portnum), &regval);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				TSI578_SP_INT_STATUS(portnum),
+				regval & 0x000700bd);
+
+		/* Enable all interrupts to allow ports to send a port-write */
+		rio_mport_read_config_32(mport, destid, hopcount,
+				TSI578_SP_CTL_INDEP(portnum), &regval);
+		rio_mport_write_config_32(mport, destid, hopcount,
+				TSI578_SP_CTL_INDEP(portnum),
+				regval | 0x000b0000);
+
+		/* Skip next (odd) port if the current port is in x4 mode */
+		rio_mport_read_config_32(mport, destid, hopcount,
+				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				&regval);
+		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
+			portnum++;
+	}
+
+	return 0;
+}
+
+static int
+tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+	struct rio_mport *mport = rdev->net->hport;
+	u16 destid = rdev->rswitch->destid;
+	u8 hopcount = rdev->rswitch->hopcount;
+	u32 intstat, err_status;
+	int sendcount, checkcount;
+	u8 route_port;
+	u32 regval;
+
+	rio_mport_read_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+			&err_status);
+
+	if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
+	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {
+		/* Remove any queued packets by locking/unlocking port */
+		rio_mport_read_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+			&regval);
+		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
+			rio_mport_write_config_32(mport, destid, hopcount,
+				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				regval | RIO_PORT_N_CTL_LOCKOUT);
+			udelay(50);
+			rio_mport_write_config_32(mport, destid, hopcount,
+				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				regval);
+		}
+
+		/* Read from link maintenance response register to clear
+		 * valid bit
+		 */
+		rio_mport_read_config_32(mport, destid, hopcount,
+			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
+			&regval);
+
+		/* Send a Packet-Not-Accepted/Link-Request-Input-Status control
+		 * symbol to recover from IES/OES
+		 */
+		sendcount = 3;
+		while (sendcount) {
+			rio_mport_write_config_32(mport, destid, hopcount,
+					  TSI578_SP_CS_TX(portnum), 0x40fc8000);
+			checkcount = 3;
+			while (checkcount--) {
+				udelay(50);
+				rio_mport_read_config_32(
+					mport, destid, hopcount,
+					rdev->phys_efptr +
+						RIO_PORT_N_MNT_RSP_CSR(portnum),
+					&regval);
+				if (regval & RIO_PORT_N_MNT_RSP_RVAL)
+					goto exit_es;
+			}
+
+			sendcount--;
+		}
+	}
+
+exit_es:
+	/* Clear implementation specific error status bits */
+	rio_mport_read_config_32(mport, destid, hopcount,
+				 TSI578_SP_INT_STATUS(portnum), &intstat);
+	pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
+		 destid, hopcount, portnum, intstat);
+
+	if (intstat & 0x10000) {
+		rio_mport_read_config_32(mport, destid, hopcount,
+				TSI578_SP_LUT_PEINF(portnum), &regval);
+		regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
+		route_port = rdev->rswitch->route_table[regval];
+		pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
+			rio_name(rdev), portnum, regval);
+		tsi57x_route_add_entry(mport, destid, hopcount,
+				RIO_GLOBAL_TABLE, regval, route_port);
+	}
+
+	rio_mport_write_config_32(mport, destid, hopcount,
+				  TSI578_SP_INT_STATUS(portnum),
+				  intstat & 0x000700bd);
+
+	return 0;
+}
+
+static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+	rdev->rswitch->add_entry = tsi57x_route_add_entry;
+	rdev->rswitch->get_entry = tsi57x_route_get_entry;
+	rdev->rswitch->clr_table = tsi57x_route_clr_table;
+	rdev->rswitch->set_domain = tsi57x_set_domain;
+	rdev->rswitch->get_domain = tsi57x_get_domain;
+	rdev->rswitch->em_init = tsi57x_em_init;
+	rdev->rswitch->em_handle = tsi57x_em_handler;
+
+	return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f159832..10ba12c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@
 	  Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
 	  support. This chip contains a battery- and capacitor-backed RTC.
 
+config RTC_DRV_AB8500
+	tristate "ST-Ericsson AB8500 RTC"
+	depends on AB8500_CORE
+	help
+	  Select this to enable the ST-Ericsson AB8500 power management IC RTC
+	  support. This chip contains a battery- and capacitor-backed RTC.
+
 config RTC_DRV_NUC900
 	tristate "NUC910/NUC920 RTC driver"
 	depends on RTC_CLASS && ARCH_W90X900
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 245311a..5adbba7 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@
 # Keep the list ordered.
 
 obj-$(CONFIG_RTC_DRV_AB3100)	+= rtc-ab3100.o
+obj-$(CONFIG_RTC_DRV_AB8500)	+= rtc-ab8500.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
 obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
 obj-$(CONFIG_RTC_DRV_AT91SAM9)	+= rtc-at91sam9.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 0000000..2fda031
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
+ *
+ * RTC clock driver for the RTC part of the AB8500 Power management chip.
+ * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
+ * Linus Walleij <linus.walleij@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/delay.h>
+
+#define AB8500_RTC_SOFF_STAT_REG	0x0F00
+#define AB8500_RTC_CC_CONF_REG		0x0F01
+#define AB8500_RTC_READ_REQ_REG		0x0F02
+#define AB8500_RTC_WATCH_TSECMID_REG	0x0F03
+#define AB8500_RTC_WATCH_TSECHI_REG	0x0F04
+#define AB8500_RTC_WATCH_TMIN_LOW_REG	0x0F05
+#define AB8500_RTC_WATCH_TMIN_MID_REG	0x0F06
+#define AB8500_RTC_WATCH_TMIN_HI_REG	0x0F07
+#define AB8500_RTC_ALRM_MIN_LOW_REG	0x0F08
+#define AB8500_RTC_ALRM_MIN_MID_REG	0x0F09
+#define AB8500_RTC_ALRM_MIN_HI_REG	0x0F0A
+#define AB8500_RTC_STAT_REG		0x0F0B
+#define AB8500_RTC_BKUP_CHG_REG		0x0F0C
+#define AB8500_RTC_FORCE_BKUP_REG	0x0F0D
+#define AB8500_RTC_CALIB_REG		0x0F0E
+#define AB8500_RTC_SWITCH_STAT_REG	0x0F0F
+#define AB8500_REV_REG			0x1080
+
+/* RtcReadRequest bits */
+#define RTC_READ_REQUEST		0x01
+#define RTC_WRITE_REQUEST		0x02
+
+/* RtcCtrl bits */
+#define RTC_ALARM_ENA			0x04
+#define RTC_STATUS_DATA			0x01
+
+#define COUNTS_PER_SEC			(0xF000 / 60)
+#define AB8500_RTC_EPOCH		2000
+
+static const unsigned long ab8500_rtc_time_regs[] = {
+	AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
+	AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
+	AB8500_RTC_WATCH_TSECMID_REG
+};
+
+static const unsigned long ab8500_rtc_alarm_regs[] = {
+	AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
+	AB8500_RTC_ALRM_MIN_LOW_REG
+};
+
+/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
+static unsigned long get_elapsed_seconds(int year)
+{
+	unsigned long secs;
+	struct rtc_time tm = {
+		.tm_year = year - 1900,
+		.tm_mday = 1,
+	};
+
+	/*
+	 * This function calculates secs from 1970 and not from
+	 * 1900, even if we supply the offset from year 1900.
+	 */
+	rtc_tm_to_time(&tm, &secs);
+	return secs;
+}
+
+static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+	unsigned long timeout = jiffies + HZ;
+	int retval, i;
+	unsigned long mins, secs;
+	unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
+
+	/* Request a data read */
+	retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
+			      RTC_READ_REQUEST);
+	if (retval < 0)
+		return retval;
+
+	/* Early AB8500 chips will not clear the rtc read request bit */
+	if (ab8500->revision == 0) {
+		msleep(1);
+	} else {
+		/* Wait for some cycles after enabling the rtc read in ab8500 */
+		while (time_before(jiffies, timeout)) {
+			retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
+			if (retval < 0)
+				return retval;
+
+			if (!(retval & RTC_READ_REQUEST))
+				break;
+
+			msleep(1);
+		}
+	}
+
+	/* Read the Watchtime registers */
+	for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
+		retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
+		if (retval < 0)
+			return retval;
+		buf[i] = retval;
+	}
+
+	mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+
+	secs =	(buf[3] << 8) | buf[4];
+	secs =	secs / COUNTS_PER_SEC;
+	secs =	secs + (mins * 60);
+
+	/* Add back the initially subtracted number of seconds */
+	secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+	rtc_time_to_tm(secs, tm);
+	return rtc_valid_tm(tm);
+}
+
+static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+	int retval, i;
+	unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
+	unsigned long no_secs, no_mins, secs = 0;
+
+	if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
+		dev_dbg(dev, "year should be equal to or greater than %d\n",
+				AB8500_RTC_EPOCH);
+		return -EINVAL;
+	}
+
+	/* Get the number of seconds since 1970 */
+	rtc_tm_to_time(tm, &secs);
+
+	/*
+	 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
+	 * we only have a small counter in the RTC.
+	 */
+	secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+	no_mins = secs / 60;
+
+	no_secs = secs % 60;
+	/* Make the seconds count as per the RTC resolution */
+	no_secs = no_secs * COUNTS_PER_SEC;
+
+	buf[4] = no_secs & 0xFF;
+	buf[3] = (no_secs >> 8) & 0xFF;
+
+	buf[2] = no_mins & 0xFF;
+	buf[1] = (no_mins >> 8) & 0xFF;
+	buf[0] = (no_mins >> 16) & 0xFF;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
+		retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
+		if (retval < 0)
+			return retval;
+	}
+
+	/* Request a data write */
+	return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
+}
+
+static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+	int retval, i;
+	int rtc_ctrl;
+	unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
+	unsigned long secs, mins;
+
+	/* Check if the alarm is enabled or not */
+	rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
+	if (rtc_ctrl < 0)
+		return rtc_ctrl;
+
+	if (rtc_ctrl & RTC_ALARM_ENA)
+		alarm->enabled = 1;
+	else
+		alarm->enabled = 0;
+
+	alarm->pending = 0;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
+		retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
+		if (retval < 0)
+			return retval;
+		buf[i] = retval;
+	}
+
+	mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
+	secs = mins * 60;
+
+	/* Add back the initially subtracted number of seconds */
+	secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+	rtc_time_to_tm(secs, &alarm->time);
+
+	return rtc_valid_tm(&alarm->time);
+}
+
+static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+
+	return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
+			       enabled ? RTC_ALARM_ENA : 0);
+}
+
+static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+	int retval, i;
+	unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
+	unsigned long mins, secs = 0;
+
+	if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
+		dev_dbg(dev, "year should be equal to or greater than %d\n",
+				AB8500_RTC_EPOCH);
+		return -EINVAL;
+	}
+
+	/* Get the number of seconds since 1970 */
+	rtc_tm_to_time(&alarm->time, &secs);
+
+	/*
+	 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
+	 * we only have a small counter in the RTC.
+	 */
+	secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+	mins = secs / 60;
+
+	buf[2] = mins & 0xFF;
+	buf[1] = (mins >> 8) & 0xFF;
+	buf[0] = (mins >> 16) & 0xFF;
+
+	/* Set the alarm time */
+	for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
+		retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
+		if (retval < 0)
+			return retval;
+	}
+
+	return ab8500_rtc_irq_enable(dev, alarm->enabled);
+}
+
+static irqreturn_t rtc_alarm_handler(int irq, void *data)
+{
+	struct rtc_device *rtc = data;
+	unsigned long events = RTC_IRQF | RTC_AF;
+
+	dev_dbg(&rtc->dev, "%s\n", __func__);
+	rtc_update_irq(rtc, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ab8500_rtc_ops = {
+	.read_time		= ab8500_rtc_read_time,
+	.set_time		= ab8500_rtc_set_time,
+	.read_alarm		= ab8500_rtc_read_alarm,
+	.set_alarm		= ab8500_rtc_set_alarm,
+	.alarm_irq_enable	= ab8500_rtc_irq_enable,
+};
+
+static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
+{
+	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+	int err;
+	struct rtc_device *rtc;
+	int rtc_ctrl;
+	int irq;
+
+	irq = platform_get_irq_byname(pdev, "ALARM");
+	if (irq < 0)
+		return irq;
+
+	/* For RTC supply test */
+	err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
+			RTC_STATUS_DATA);
+	if (err < 0)
+		return err;
+
+	/* Wait for reset by the PorRtc */
+	msleep(1);
+
+	rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
+	if (rtc_ctrl < 0)
+		return rtc_ctrl;
+
+	/* Check if the RTC Supply fails */
+	if (!(rtc_ctrl & RTC_STATUS_DATA)) {
+		dev_err(&pdev->dev, "RTC supply failure\n");
+		return -ENODEV;
+	}
+
+	rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
+			THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		dev_err(&pdev->dev, "Registration failed\n");
+		err = PTR_ERR(rtc);
+		return err;
+	}
+
+	err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
+				   "ab8500-rtc", rtc);
+	if (err < 0) {
+		rtc_device_unregister(rtc);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, rtc);
+
+	return 0;
+}
+
+static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
+{
+	struct rtc_device *rtc = platform_get_drvdata(pdev);
+	int irq = platform_get_irq_byname(pdev, "ALARM");
+
+	free_irq(irq, rtc);
+	rtc_device_unregister(rtc);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver ab8500_rtc_driver = {
+	.driver = {
+		.name = "ab8500-rtc",
+		.owner = THIS_MODULE,
+	},
+	.probe	= ab8500_rtc_probe,
+	.remove = __devexit_p(ab8500_rtc_remove),
+};
+
+static int __init ab8500_rtc_init(void)
+{
+	return platform_driver_register(&ab8500_rtc_driver);
+}
+
+static void __exit ab8500_rtc_exit(void)
+{
+	platform_driver_unregister(&ab8500_rtc_driver);
+}
+
+module_init(ab8500_rtc_init);
+module_exit(ab8500_rtc_exit);
+MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 038095d..6dc4e62 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@
 static ssize_t wdt_write(struct file *file, const char __user *buf,
 			 size_t count, loff_t *ppos)
 {
-	/*  Can't seek (pwrite) on this device
-	if (ppos != &file->f_pos)
-	return -ESPIPE;
-	*/
 	if (count) {
 		wdt_ping();
 		return 1;
@@ -707,7 +703,7 @@
 		 */
 		wdt_is_open = 1;
 		unlock_kernel();
-		return 0;
+		return nonseekable_open(inode, file);
 	}
 	return -ENODEV;
 }
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 8dbf1c3..d64b717 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@
 		if (i == (-ENOSPC)) {
 			transfer = STp->buffer->writing;	/* FIXME -- check this logic */
 			if (transfer <= do_count) {
-				filp->f_pos += do_count - transfer;
+				*ppos += do_count - transfer;
 				count -= do_count - transfer;
 				if (STps->drv_block >= 0) {
 					STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@
 			goto out;
 		}
 
-		filp->f_pos += do_count;
+		*ppos += do_count;
 		b_point += do_count;
 		count -= do_count;
 		if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@
 		if (STps->drv_block >= 0) {
 			STps->drv_block += blks;
 		}
-		filp->f_pos += count;
+		*ppos += count;
 		count = 0;
 	}
 
@@ -3823,7 +3823,7 @@
 			}
 			STp->logical_blk_num += transfer / STp->block_size;
 			STps->drv_block      += transfer / STp->block_size;
-			filp->f_pos          += transfer;
+			*ppos          += transfer;
 			buf                  += transfer;
 			total                += transfer;
 		}
@@ -5626,6 +5626,7 @@
 	.open =         os_scsi_tape_open,
 	.flush =        os_scsi_tape_flush,
 	.release =      os_scsi_tape_close,
+	.llseek =	noop_llseek,
 };
 
 static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a71..24211d0 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@
 	.open =		st_open,
 	.flush =	st_flush,
 	.release =	st_release,
+	.llseek =	noop_llseek,
 };
 
 static int st_probe(struct device *dev)
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index 49f0d31..cf7c34a 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@
 		printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
 				(status >> 16) & 0x0f);
 	if (status & 0x100000) {
-		dma_sync_single(&dev->pci->dev,
-				saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(&dev->pci->dev,
+					saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 		go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
 		saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
 	} else {
-		dma_sync_single(&dev->pci->dev,
-				saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(&dev->pci->dev,
+					saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 		go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
 		saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
 	}
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c..b53deee 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@
 static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
 {
 	IXJ_FILTER_CADENCE *lcp;
-	lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL);
-	if (lcp == NULL) {
+	lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
+	if (IS_ERR(lcp)) {
 		if(ixjdebug & 0x0001) {
-			printk(KERN_INFO "Could not allocate memory for cadence\n");
+			printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
 		}
-		return -ENOMEM;
+		return PTR_ERR(lcp);
         }
-	if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
-		if(ixjdebug & 0x0001) {
-			printk(KERN_INFO "Could not copy cadence to kernel\n");
-		}
-		kfree(lcp);
-		return -EFAULT;
-	}
 	if (lcp->filter > 5) {
 		if(ixjdebug & 0x0001) {
 			printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c..b020ba7 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@
 
 static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
 {
+#ifndef NO_BL_SUPPORT
 	struct backlight_properties props;
+#endif
 	struct bfin_bf54xfb_info *info;
 	struct fb_info *fbinfo;
 	int ret;
@@ -654,7 +656,8 @@
 		printk(KERN_ERR DRIVER_NAME
 			": unable to register backlight.\n");
 		ret = -EINVAL;
-		goto out9;
+		unregister_framebuffer(fbinfo);
+		goto out8;
 	}
 
 	lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@
 
 	return 0;
 
-out9:
-	unregister_framebuffer(fbinfo);
 out8:
 	free_irq(info->irq, info);
 out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd..7a50272 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@
 
 static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
 {
+#ifndef NO_BL_SUPPORT
 	struct backlight_properties props;
+#endif
 	struct bfin_t350mcqbfb_info *info;
 	struct fb_info *fbinfo;
 	int ret;
@@ -550,7 +552,8 @@
 		printk(KERN_ERR DRIVER_NAME
 			": unable to register backlight.\n");
 		ret = -EINVAL;
-		goto out9;
+		unregister_framebuffer(fbinfo);
+		goto out8;
 	}
 
 	lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@
 
 	return 0;
 
-out9:
-	unregister_framebuffer(fbinfo);
 out8:
 	free_irq(info->irq, info);
 out7:
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4..dce8c97 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@
 			"S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
 			"S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
 			"S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
-			"S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"};
+			"S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
+			"S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
 
 #define CHIP_UNKNOWN		0x00
 #define CHIP_732_TRIO32		0x01
@@ -89,10 +90,14 @@
 #define CHIP_356_VIRGE_GX2	0x0D
 #define CHIP_357_VIRGE_GX2P	0x0E
 #define CHIP_359_VIRGE_GX2P	0x0F
+#define CHIP_360_TRIO3D_1X	0x10
+#define CHIP_362_TRIO3D_2X	0x11
+#define CHIP_368_TRIO3D_2X	0x12
 
 #define CHIP_XXX_TRIO		0x80
 #define CHIP_XXX_TRIO64V2_DXGX	0x81
 #define CHIP_XXX_VIRGE_DXGX	0x82
+#define CHIP_36X_TRIO3D_1X_2X	0x83
 
 #define CHIP_UNDECIDED_FLAG	0x80
 #define CHIP_MASK		0xFF
@@ -324,6 +329,7 @@
 
 static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
 {
+	struct s3fb_info *par = info->par;
 	u16 m, n, r;
 	u8 regval;
 	int rv;
@@ -339,7 +345,13 @@
 	vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
 
 	/* Set S3 clock registers */
-	vga_wseq(NULL, 0x12, ((n - 2) | (r << 5)));
+	if (par->chip == CHIP_360_TRIO3D_1X ||
+	    par->chip == CHIP_362_TRIO3D_2X ||
+	    par->chip == CHIP_368_TRIO3D_2X) {
+		vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6));	/* n and two bits of r */
+		vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
+	} else
+		vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
 	vga_wseq(NULL, 0x13, m - 2);
 
 	udelay(1000);
@@ -456,7 +468,7 @@
 static int s3fb_set_par(struct fb_info *info)
 {
 	struct s3fb_info *par = info->par;
-	u32 value, mode, hmul, offset_value, screen_size, multiplex;
+	u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
 	u32 bpp = info->var.bits_per_pixel;
 
 	if (bpp != 0) {
@@ -518,7 +530,7 @@
 	svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ?	*/
 	svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ?	*/
 
-	svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits
+	svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
 
 /*	svga_wcrt_mask(0x58, 0x03, 0x03); */
 
@@ -530,10 +542,14 @@
 	pr_debug("fb%d: offset register       : %d\n", info->node, offset_value);
 	svga_wcrt_multi(s3_offset_regs, offset_value);
 
-	vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
-	vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
-	vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
-	vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
+	if (par->chip != CHIP_360_TRIO3D_1X &&
+	    par->chip != CHIP_362_TRIO3D_2X &&
+	    par->chip != CHIP_368_TRIO3D_2X) {
+		vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
+		vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
+		vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
+		vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
+	}
 
 	vga_wcrt(NULL, 0x3A, 0x35);
 	svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@
 		vga_wcrt(NULL, 0x66, 0x90);
 	}
 
+	if (par->chip == CHIP_360_TRIO3D_1X ||
+	    par->chip == CHIP_362_TRIO3D_2X ||
+	    par->chip == CHIP_368_TRIO3D_2X) {
+		dbytes = info->var.xres * ((bpp+7)/8);
+		vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
+		vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
+
+		vga_wcrt(NULL, 0x66, 0x81);
+	}
+
 	svga_wcrt_mask(0x31, 0x00, 0x40);
 	multiplex = 0;
 	hmul = 1;
@@ -615,11 +641,13 @@
 		break;
 	case 3:
 		pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
-		if (info->var.pixclock > 20000) {
-			svga_wcrt_mask(0x50, 0x00, 0x30);
+		svga_wcrt_mask(0x50, 0x00, 0x30);
+		if (info->var.pixclock > 20000 ||
+		    par->chip == CHIP_360_TRIO3D_1X ||
+		    par->chip == CHIP_362_TRIO3D_2X ||
+		    par->chip == CHIP_368_TRIO3D_2X)
 			svga_wcrt_mask(0x67, 0x00, 0xF0);
-		} else {
-			svga_wcrt_mask(0x50, 0x00, 0x30);
+		else {
 			svga_wcrt_mask(0x67, 0x10, 0xF0);
 			multiplex = 1;
 		}
@@ -634,7 +662,10 @@
 		} else {
 			svga_wcrt_mask(0x50, 0x10, 0x30);
 			svga_wcrt_mask(0x67, 0x30, 0xF0);
-			hmul = 2;
+			if (par->chip != CHIP_360_TRIO3D_1X &&
+			    par->chip != CHIP_362_TRIO3D_2X &&
+			    par->chip != CHIP_368_TRIO3D_2X)
+				hmul = 2;
 		}
 		break;
 	case 5:
@@ -647,7 +678,10 @@
 		} else {
 			svga_wcrt_mask(0x50, 0x10, 0x30);
 			svga_wcrt_mask(0x67, 0x50, 0xF0);
-			hmul = 2;
+			if (par->chip != CHIP_360_TRIO3D_1X &&
+			    par->chip != CHIP_362_TRIO3D_2X &&
+			    par->chip != CHIP_368_TRIO3D_2X)
+				hmul = 2;
 		}
 		break;
 	case 6:
@@ -866,6 +900,17 @@
 			return CHIP_385_VIRGE_GX;
 	}
 
+	if (chip == CHIP_36X_TRIO3D_1X_2X) {
+		switch (vga_rcrt(NULL, 0x2f)) {
+		case 0x00:
+			return CHIP_360_TRIO3D_1X;
+		case 0x01:
+			return CHIP_362_TRIO3D_2X;
+		case 0x02:
+			return CHIP_368_TRIO3D_2X;
+		}
+	}
+
 	return CHIP_UNKNOWN;
 }
 
@@ -930,17 +975,32 @@
 	vga_wcrt(NULL, 0x38, 0x48);
 	vga_wcrt(NULL, 0x39, 0xA5);
 
-	/* Find how many physical memory there is on card */
-	/* 0x36 register is accessible even if other registers are locked */
-	regval = vga_rcrt(NULL, 0x36);
-	info->screen_size = s3_memsizes[regval >> 5] << 10;
-	info->fix.smem_len = info->screen_size;
-
+	/* Identify chip type */
 	par->chip = id->driver_data & CHIP_MASK;
 	par->rev = vga_rcrt(NULL, 0x2f);
 	if (par->chip & CHIP_UNDECIDED_FLAG)
 		par->chip = s3_identification(par->chip);
 
+	/* Find how many physical memory there is on card */
+	/* 0x36 register is accessible even if other registers are locked */
+	regval = vga_rcrt(NULL, 0x36);
+	if (par->chip == CHIP_360_TRIO3D_1X ||
+	    par->chip == CHIP_362_TRIO3D_2X ||
+	    par->chip == CHIP_368_TRIO3D_2X) {
+		switch ((regval & 0xE0) >> 5) {
+		case 0: /* 8MB -- only 4MB usable for display */
+		case 1: /* 4MB with 32-bit bus */
+		case 2:	/* 4MB */
+			info->screen_size = 4 << 20;
+			break;
+		case 6: /* 2MB */
+			info->screen_size = 2 << 20;
+			break;
+		}
+	} else
+		info->screen_size = s3_memsizes[regval >> 5] << 10;
+	info->fix.smem_len = info->screen_size;
+
 	/* Find MCLK frequency */
 	regval = vga_rseq(NULL, 0x10);
 	par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F)  + 2);
@@ -1131,6 +1191,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
 	{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
 	{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
+	{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
 
 	{0, 0, 0, 0, 0, 0, 0}
 };
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 2bc40e6..1082541 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -578,14 +578,9 @@
 		break;
 
 	case VIAFB_SET_GAMMA_LUT:
-		viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL);
-		if (!viafb_gamma_table)
-			return -ENOMEM;
-		if (copy_from_user(viafb_gamma_table, argp,
-				256 * sizeof(u32))) {
-			kfree(viafb_gamma_table);
-			return -EFAULT;
-		}
+		viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
+		if (IS_ERR(viafb_gamma_table))
+			return PTR_ERR(viafb_gamma_table);
 		viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
 		kfree(viafb_gamma_table);
 		break;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index d70bbba..914d1c0 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,7 +224,7 @@
 		affs_brelse(bh);
 		inode = affs_iget(sb, ino);
 		if (IS_ERR(inode))
-			return ERR_PTR(PTR_ERR(inode));
+			return ERR_CAST(inode);
 	}
 	dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
 	d_add(dentry, inode);
diff --git a/fs/aio.c b/fs/aio.c
index 1cf12b3..48fdeeb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
 #include <linux/blkdev.h>
 #include <linux/mempool.h>
 #include <linux/hash.h>
+#include <linux/compat.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@@ -1384,13 +1385,22 @@
 	return ret;
 }
 
-static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
+static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
 {
 	ssize_t ret;
 
-	ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
-				    kiocb->ki_nbytes, 1,
-				    &kiocb->ki_inline_vec, &kiocb->ki_iovec);
+#ifdef CONFIG_COMPAT
+	if (compat)
+		ret = compat_rw_copy_check_uvector(type,
+				(struct compat_iovec __user *)kiocb->ki_buf,
+				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+				&kiocb->ki_iovec);
+	else
+#endif
+		ret = rw_copy_check_uvector(type,
+				(struct iovec __user *)kiocb->ki_buf,
+				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+				&kiocb->ki_iovec);
 	if (ret < 0)
 		goto out;
 
@@ -1420,7 +1430,7 @@
  *	Performs the initial checks and aio retry method
  *	setup for the kiocb at the time of io submission.
  */
-static ssize_t aio_setup_iocb(struct kiocb *kiocb)
+static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
 {
 	struct file *file = kiocb->ki_filp;
 	ssize_t ret = 0;
@@ -1469,7 +1479,7 @@
 		ret = security_file_permission(file, MAY_READ);
 		if (unlikely(ret))
 			break;
-		ret = aio_setup_vectored_rw(READ, kiocb);
+		ret = aio_setup_vectored_rw(READ, kiocb, compat);
 		if (ret)
 			break;
 		ret = -EINVAL;
@@ -1483,7 +1493,7 @@
 		ret = security_file_permission(file, MAY_WRITE);
 		if (unlikely(ret))
 			break;
-		ret = aio_setup_vectored_rw(WRITE, kiocb);
+		ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
 		if (ret)
 			break;
 		ret = -EINVAL;
@@ -1548,7 +1558,8 @@
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-			 struct iocb *iocb, struct hlist_head *batch_hash)
+			 struct iocb *iocb, struct hlist_head *batch_hash,
+			 bool compat)
 {
 	struct kiocb *req;
 	struct file *file;
@@ -1609,7 +1620,7 @@
 	req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
 	req->ki_opcode = iocb->aio_lio_opcode;
 
-	ret = aio_setup_iocb(req);
+	ret = aio_setup_iocb(req, compat);
 
 	if (ret)
 		goto out_put_req;
@@ -1637,20 +1648,8 @@
 	return ret;
 }
 
-/* sys_io_submit:
- *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
- *	the number of iocbs queued.  May return -EINVAL if the aio_context
- *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
- *	*iocbpp[0] is not properly initialized, if the operation specified
- *	is invalid for the file descriptor in the iocb.  May fail with
- *	-EFAULT if any of the data structures point to invalid data.  May
- *	fail with -EBADF if the file descriptor specified in the first
- *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
- *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
- *	fail with -ENOSYS if not implemented.
- */
-SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
-		struct iocb __user * __user *, iocbpp)
+long do_io_submit(aio_context_t ctx_id, long nr,
+		  struct iocb __user *__user *iocbpp, bool compat)
 {
 	struct kioctx *ctx;
 	long ret = 0;
@@ -1687,7 +1686,7 @@
 			break;
 		}
 
-		ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
+		ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
 		if (ret)
 			break;
 	}
@@ -1697,6 +1696,24 @@
 	return i ? i : ret;
 }
 
+/* sys_io_submit:
+ *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
+ *	the number of iocbs queued.  May return -EINVAL if the aio_context
+ *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
+ *	*iocbpp[0] is not properly initialized, if the operation specified
+ *	is invalid for the file descriptor in the iocb.  May fail with
+ *	-EFAULT if any of the data structures point to invalid data.  May
+ *	fail with -EBADF if the file descriptor specified in the first
+ *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
+ *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
+ *	fail with -ENOSYS if not implemented.
+ */
+SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
+		struct iocb __user * __user *, iocbpp)
+{
+	return do_io_submit(ctx_id, nr, iocbpp, 0);
+}
+
 /* lookup_kiocb
  *	Finds a given iocb for cancellation.
  */
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8713c7c..9a0520b 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -28,6 +28,7 @@
 static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
 
 const struct file_operations autofs_root_operations = {
+	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
 	.readdir	= autofs_root_readdir,
 	.ioctl		= autofs_root_ioctl,
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index d832062..ba4a38b 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@
  */
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
 {
-	struct autofs_dev_ioctl tmp, *ads;
+	struct autofs_dev_ioctl tmp;
 
 	if (copy_from_user(&tmp, in, sizeof(tmp)))
 		return ERR_PTR(-EFAULT);
@@ -103,16 +103,7 @@
 	if (tmp.size < sizeof(tmp))
 		return ERR_PTR(-EINVAL);
 
-	ads = kmalloc(tmp.size, GFP_KERNEL);
-	if (!ads)
-		return ERR_PTR(-ENOMEM);
-
-	if (copy_from_user(ads, in, tmp.size)) {
-		kfree(ads);
-		return ERR_PTR(-EFAULT);
-	}
-
-	return ads;
+	return memdup_user(in, tmp.size);
 }
 
 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/compat.c b/fs/compat.c
index 0544873..f0b391c 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -568,6 +568,79 @@
 	return ret;
 }
 
+/* A write operation does a read from user space and vice versa */
+#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
+
+ssize_t compat_rw_copy_check_uvector(int type,
+		const struct compat_iovec __user *uvector, unsigned long nr_segs,
+		unsigned long fast_segs, struct iovec *fast_pointer,
+		struct iovec **ret_pointer)
+{
+	compat_ssize_t tot_len;
+	struct iovec *iov = *ret_pointer = fast_pointer;
+	ssize_t ret = 0;
+	int seg;
+
+	/*
+	 * SuS says "The readv() function *may* fail if the iovcnt argument
+	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
+	 * traditionally returned zero for zero segments, so...
+	 */
+	if (nr_segs == 0)
+		goto out;
+
+	ret = -EINVAL;
+	if (nr_segs > UIO_MAXIOV || nr_segs < 0)
+		goto out;
+	if (nr_segs > fast_segs) {
+		ret = -ENOMEM;
+		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+		if (iov == NULL) {
+			*ret_pointer = fast_pointer;
+			goto out;
+		}
+	}
+	*ret_pointer = iov;
+
+	/*
+	 * Single unix specification:
+	 * We should -EINVAL if an element length is not >= 0 and fitting an
+	 * ssize_t.  The total length is fitting an ssize_t
+	 *
+	 * Be careful here because iov_len is a size_t not an ssize_t
+	 */
+	tot_len = 0;
+	ret = -EINVAL;
+	for (seg = 0; seg < nr_segs; seg++) {
+		compat_ssize_t tmp = tot_len;
+		compat_uptr_t buf;
+		compat_ssize_t len;
+
+		if (__get_user(len, &uvector->iov_len) ||
+		   __get_user(buf, &uvector->iov_base)) {
+			ret = -EFAULT;
+			goto out;
+		}
+		if (len < 0)	/* size_t not fitting in compat_ssize_t .. */
+			goto out;
+		tot_len += len;
+		if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
+			goto out;
+		if (!access_ok(vrfy_dir(type), buf, len)) {
+			ret = -EFAULT;
+			goto out;
+		}
+		iov->iov_base = compat_ptr(buf);
+		iov->iov_len = (compat_size_t) len;
+		uvector++;
+		iov++;
+	}
+	ret = tot_len;
+
+out:
+	return ret;
+}
+
 static inline long
 copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
 {
@@ -600,7 +673,7 @@
 	iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
 	ret = copy_iocb(nr, iocb, iocb64);
 	if (!ret)
-		ret = sys_io_submit(ctx_id, nr, iocb64);
+		ret = do_io_submit(ctx_id, nr, iocb64, 1);
 	return ret;
 }
 
@@ -1077,70 +1150,21 @@
 {
 	compat_ssize_t tot_len;
 	struct iovec iovstack[UIO_FASTIOV];
-	struct iovec *iov=iovstack, *vector;
+	struct iovec *iov;
 	ssize_t ret;
-	int seg;
 	io_fn_t fn;
 	iov_fn_t fnv;
 
-	/*
-	 * SuS says "The readv() function *may* fail if the iovcnt argument
-	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
-	 * traditionally returned zero for zero segments, so...
-	 */
-	ret = 0;
-	if (nr_segs == 0)
-		goto out;
-
-	/*
-	 * First get the "struct iovec" from user memory and
-	 * verify all the pointers
-	 */
 	ret = -EINVAL;
-	if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
-		goto out;
 	if (!file->f_op)
 		goto out;
-	if (nr_segs > UIO_FASTIOV) {
-		ret = -ENOMEM;
-		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
-		if (!iov)
-			goto out;
-	}
+
 	ret = -EFAULT;
 	if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
 		goto out;
 
-	/*
-	 * Single unix specification:
-	 * We should -EINVAL if an element length is not >= 0 and fitting an
-	 * ssize_t.  The total length is fitting an ssize_t
-	 *
-	 * Be careful here because iov_len is a size_t not an ssize_t
-	 */
-	tot_len = 0;
-	vector = iov;
-	ret = -EINVAL;
-	for (seg = 0 ; seg < nr_segs; seg++) {
-		compat_ssize_t tmp = tot_len;
-		compat_ssize_t len;
-		compat_uptr_t buf;
-
-		if (__get_user(len, &uvector->iov_len) ||
-		    __get_user(buf, &uvector->iov_base)) {
-			ret = -EFAULT;
-			goto out;
-		}
-		if (len < 0)	/* size_t not fitting an compat_ssize_t .. */
-			goto out;
-		tot_len += len;
-		if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
-			goto out;
-		vector->iov_base = compat_ptr(buf);
-		vector->iov_len = (compat_size_t) len;
-		uvector++;
-		vector++;
-	}
+	tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
+					       UIO_FASTIOV, iovstack, &iov);
 	if (tot_len == 0) {
 		ret = 0;
 		goto out;
diff --git a/fs/exec.c b/fs/exec.c
index 9badbc0..e19de6a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -768,7 +768,6 @@
 	struct signal_struct *sig = tsk->signal;
 	struct sighand_struct *oldsighand = tsk->sighand;
 	spinlock_t *lock = &oldsighand->siglock;
-	int count;
 
 	if (thread_group_empty(tsk))
 		goto no_thread_group;
@@ -785,13 +784,13 @@
 		spin_unlock_irq(lock);
 		return -EAGAIN;
 	}
-	sig->group_exit_task = tsk;
-	zap_other_threads(tsk);
 
-	/* Account for the thread group leader hanging around: */
-	count = thread_group_leader(tsk) ? 1 : 2;
-	sig->notify_count = count;
-	while (atomic_read(&sig->count) > count) {
+	sig->group_exit_task = tsk;
+	sig->notify_count = zap_other_threads(tsk);
+	if (!thread_group_leader(tsk))
+		sig->notify_count--;
+
+	while (sig->notify_count) {
 		__set_current_state(TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(lock);
 		schedule();
@@ -1662,12 +1661,15 @@
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->mm;
 	struct completion *vfork_done;
-	int core_waiters;
+	int core_waiters = -EBUSY;
 
 	init_completion(&core_state->startup);
 	core_state->dumper.task = tsk;
 	core_state->dumper.next = NULL;
-	core_waiters = zap_threads(tsk, mm, core_state, exit_code);
+
+	down_write(&mm->mmap_sem);
+	if (!mm->core_state)
+		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
 	up_write(&mm->mmap_sem);
 
 	if (unlikely(core_waiters < 0))
@@ -1787,21 +1789,61 @@
 }
 
 
+/*
+ * uhm_pipe_setup
+ * helper function to customize the process used
+ * to collect the core in userspace.  Specifically
+ * it sets up a pipe and installs it as fd 0 (stdin)
+ * for the process.  Returns 0 on success, or
+ * PTR_ERR on failure.
+ * Note that it also sets the core limit to 1.  This
+ * is a special value that we use to trap recursive
+ * core dumps
+ */
+static int umh_pipe_setup(struct subprocess_info *info)
+{
+	struct file *rp, *wp;
+	struct fdtable *fdt;
+	struct coredump_params *cp = (struct coredump_params *)info->data;
+	struct files_struct *cf = current->files;
+
+	wp = create_write_pipe(0);
+	if (IS_ERR(wp))
+		return PTR_ERR(wp);
+
+	rp = create_read_pipe(wp, 0);
+	if (IS_ERR(rp)) {
+		free_write_pipe(wp);
+		return PTR_ERR(rp);
+	}
+
+	cp->file = wp;
+
+	sys_close(0);
+	fd_install(0, rp);
+	spin_lock(&cf->file_lock);
+	fdt = files_fdtable(cf);
+	FD_SET(0, fdt->open_fds);
+	FD_CLR(0, fdt->close_on_exec);
+	spin_unlock(&cf->file_lock);
+
+	/* and disallow core files too */
+	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
+
+	return 0;
+}
+
 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
 {
 	struct core_state core_state;
 	char corename[CORENAME_MAX_SIZE + 1];
 	struct mm_struct *mm = current->mm;
 	struct linux_binfmt * binfmt;
-	struct inode * inode;
 	const struct cred *old_cred;
 	struct cred *cred;
 	int retval = 0;
 	int flag = 0;
-	int ispipe = 0;
-	char **helper_argv = NULL;
-	int helper_argc = 0;
-	int dump_count = 0;
+	int ispipe;
 	static atomic_t core_dump_count = ATOMIC_INIT(0);
 	struct coredump_params cprm = {
 		.signr = signr,
@@ -1820,23 +1862,12 @@
 	binfmt = mm->binfmt;
 	if (!binfmt || !binfmt->core_dump)
 		goto fail;
+	if (!__get_dumpable(cprm.mm_flags))
+		goto fail;
 
 	cred = prepare_creds();
-	if (!cred) {
-		retval = -ENOMEM;
+	if (!cred)
 		goto fail;
-	}
-
-	down_write(&mm->mmap_sem);
-	/*
-	 * If another thread got here first, or we are not dumpable, bail out.
-	 */
-	if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
-		up_write(&mm->mmap_sem);
-		put_cred(cred);
-		goto fail;
-	}
-
 	/*
 	 *	We cannot trust fsuid as being the "true" uid of the
 	 *	process nor do we know its entire history. We only know it
@@ -1849,10 +1880,8 @@
 	}
 
 	retval = coredump_wait(exit_code, &core_state);
-	if (retval < 0) {
-		put_cred(cred);
-		goto fail;
-	}
+	if (retval < 0)
+		goto fail_creds;
 
 	old_cred = override_creds(cred);
 
@@ -1870,19 +1899,19 @@
 	ispipe = format_corename(corename, signr);
 	unlock_kernel();
 
-	if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
-		goto fail_unlock;
-
  	if (ispipe) {
-		if (cprm.limit == 0) {
+		int dump_count;
+		char **helper_argv;
+
+		if (cprm.limit == 1) {
 			/*
 			 * Normally core limits are irrelevant to pipes, since
 			 * we're not writing to the file system, but we use
-			 * cprm.limit of 0 here as a speacial value. Any
-			 * non-zero limit gets set to RLIM_INFINITY below, but
+			 * cprm.limit of 1 here as a speacial value. Any
+			 * non-1 limit gets set to RLIM_INFINITY below, but
 			 * a limit of 0 skips the dump.  This is a consistent
 			 * way to catch recursive crashes.  We can still crash
-			 * if the core_pattern binary sets RLIM_CORE =  !0
+			 * if the core_pattern binary sets RLIM_CORE =  !1
 			 * but it runs as root, and can do lots of stupid things
 			 * Note that we use task_tgid_vnr here to grab the pid
 			 * of the process group leader.  That way we get the
@@ -1890,11 +1919,12 @@
 			 * core_pattern process dies.
 			 */
 			printk(KERN_WARNING
-				"Process %d(%s) has RLIMIT_CORE set to 0\n",
+				"Process %d(%s) has RLIMIT_CORE set to 1\n",
 				task_tgid_vnr(current), current->comm);
 			printk(KERN_WARNING "Aborting core\n");
 			goto fail_unlock;
 		}
+		cprm.limit = RLIM_INFINITY;
 
 		dump_count = atomic_inc_return(&core_dump_count);
 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
@@ -1904,71 +1934,74 @@
 			goto fail_dropcount;
 		}
 
-		helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
+		helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
 		if (!helper_argv) {
 			printk(KERN_WARNING "%s failed to allocate memory\n",
 			       __func__);
 			goto fail_dropcount;
 		}
 
-		cprm.limit = RLIM_INFINITY;
-
-		/* SIGPIPE can happen, but it's just never processed */
-		if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
-				&cprm.file)) {
+		retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
+					NULL, UMH_WAIT_EXEC, umh_pipe_setup,
+					NULL, &cprm);
+		argv_free(helper_argv);
+		if (retval) {
  			printk(KERN_INFO "Core dump to %s pipe failed\n",
 			       corename);
-			goto fail_dropcount;
+			goto close_fail;
  		}
- 	} else
+	} else {
+		struct inode *inode;
+
+		if (cprm.limit < binfmt->min_coredump)
+			goto fail_unlock;
+
 		cprm.file = filp_open(corename,
 				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
 				 0600);
-	if (IS_ERR(cprm.file))
-		goto fail_dropcount;
-	inode = cprm.file->f_path.dentry->d_inode;
-	if (inode->i_nlink > 1)
-		goto close_fail;	/* multiple links - don't dump */
-	if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
-		goto close_fail;
+		if (IS_ERR(cprm.file))
+			goto fail_unlock;
 
-	/* AK: actually i see no reason to not allow this for named pipes etc.,
-	   but keep the previous behaviour for now. */
-	if (!ispipe && !S_ISREG(inode->i_mode))
-		goto close_fail;
-	/*
-	 * Dont allow local users get cute and trick others to coredump
-	 * into their pre-created files:
-	 * Note, this is not relevant for pipes
-	 */
-	if (!ispipe && (inode->i_uid != current_fsuid()))
-		goto close_fail;
-	if (!cprm.file->f_op)
-		goto close_fail;
-	if (!cprm.file->f_op->write)
-		goto close_fail;
-	if (!ispipe &&
-	    do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
-		goto close_fail;
+		inode = cprm.file->f_path.dentry->d_inode;
+		if (inode->i_nlink > 1)
+			goto close_fail;
+		if (d_unhashed(cprm.file->f_path.dentry))
+			goto close_fail;
+		/*
+		 * AK: actually i see no reason to not allow this for named
+		 * pipes etc, but keep the previous behaviour for now.
+		 */
+		if (!S_ISREG(inode->i_mode))
+			goto close_fail;
+		/*
+		 * Dont allow local users get cute and trick others to coredump
+		 * into their pre-created files.
+		 */
+		if (inode->i_uid != current_fsuid())
+			goto close_fail;
+		if (!cprm.file->f_op || !cprm.file->f_op->write)
+			goto close_fail;
+		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+			goto close_fail;
+	}
 
 	retval = binfmt->core_dump(&cprm);
-
 	if (retval)
 		current->signal->group_exit_code |= 0x80;
-close_fail:
+
 	if (ispipe && core_pipe_limit)
 		wait_for_dump_helpers(cprm.file);
-	filp_close(cprm.file, NULL);
+close_fail:
+	if (cprm.file)
+		filp_close(cprm.file, NULL);
 fail_dropcount:
-	if (dump_count)
+	if (ispipe)
 		atomic_dec(&core_dump_count);
 fail_unlock:
-	if (helper_argv)
-		argv_free(helper_argv);
-
-	revert_creds(old_cred);
-	put_cred(cred);
 	coredump_finish(mm);
+	revert_creds(old_cred);
+fail_creds:
+	put_cred(cred);
 fail:
 	return;
 }
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index aee049c..0ec7bb2 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -57,6 +57,8 @@
 };
 
 const struct file_operations vxfs_dir_operations = {
+	.llseek =		generic_file_llseek,
+	.read =			generic_read_dir,
 	.readdir =		vxfs_readdir,
 };
 
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index b9ab69b..e0aca9a 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -272,6 +272,7 @@
 
 const struct file_operations isofs_dir_operations =
 {
+	.llseek = generic_file_llseek,
 	.read = generic_read_dir,
 	.readdir = isofs_readdir,
 };
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 92dde6f..9578cbe 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -49,6 +49,7 @@
 		      
 const struct file_operations ncp_dir_operations =
 {
+	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
 	.readdir	= ncp_readdir,
 	.unlocked_ioctl	= ncp_ioctl,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ee9a179..db64854 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1741,6 +1741,7 @@
 			clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
 			smp_mb__after_clear_bit();
 		}
+		spin_unlock(&inode->i_lock);
 	}
 	spin_unlock(&nfs_access_lru_lock);
 	nfs_access_free_list(&head);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3aea3ca..91679e2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1386,7 +1386,7 @@
 	int res = 0;
 
 	if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
-		goto out;
+		goto out_mark_dirty;
 	spin_lock(&inode->i_lock);
 	res = nfs_scan_commit(inode, &head, 0, 0);
 	spin_unlock(&inode->i_lock);
@@ -1398,9 +1398,18 @@
 			wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
 					nfs_wait_bit_killable,
 					TASK_KILLABLE);
+		else
+			goto out_mark_dirty;
 	} else
 		nfs_commit_clear_lock(NFS_I(inode));
-out:
+	return res;
+	/* Note: If we exit without ensuring that the commit is complete,
+	 * we must mark the inode as dirty. Otherwise, future calls to
+	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
+	 * that the data is on the disk.
+	 */
+out_mark_dirty:
+	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 	return res;
 }
 
@@ -1509,14 +1518,17 @@
 	};
 	int ret;
 
-	while(PagePrivate(page)) {
+	for (;;) {
 		wait_on_page_writeback(page);
 		if (clear_page_dirty_for_io(page)) {
 			ret = nfs_writepage_locked(page, &wbc);
 			if (ret < 0)
 				goto out_error;
+			continue;
 		}
-		ret = sync_inode(inode, &wbc);
+		if (!PagePrivate(page))
+			break;
+		ret = nfs_commit_inode(inode, FLUSH_SYNC);
 		if (ret < 0)
 			goto out_error;
 	}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 885ab55..9b58d38 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -267,7 +267,7 @@
 		shpending = p->signal->shared_pending.signal;
 		blocked = p->blocked;
 		collect_sigign_sigcatch(p, &ignored, &caught);
-		num_threads = atomic_read(&p->signal->count);
+		num_threads = get_nr_threads(p);
 		rcu_read_lock();  /* FIXME: is this correct? */
 		qsize = atomic_read(&__task_cred(p)->user->sigpending);
 		rcu_read_unlock();
@@ -410,7 +410,7 @@
 			tty_nr = new_encode_dev(tty_devnum(sig->tty));
 		}
 
-		num_threads = atomic_read(&sig->count);
+		num_threads = get_nr_threads(task);
 		collect_sigign_sigcatch(task, &sigign, &sigcatch);
 
 		cmin_flt = sig->cmin_flt;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c7f9f23..acb7ef8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -166,18 +166,6 @@
 	return result;
 }
 
-static int get_nr_threads(struct task_struct *tsk)
-{
-	unsigned long flags;
-	int count = 0;
-
-	if (lock_task_sighand(tsk, &flags)) {
-		count = atomic_read(&tsk->signal->count);
-		unlock_task_sighand(tsk, &flags);
-	}
-	return count;
-}
-
 static int proc_cwd_link(struct inode *inode, struct path *path)
 {
 	struct task_struct *task = get_proc_task(inode);
@@ -2444,7 +2432,7 @@
 	const struct pid_entry *p = ptr;
 	struct inode *inode;
 	struct proc_inode *ei;
-	struct dentry *error = ERR_PTR(-EINVAL);
+	struct dentry *error;
 
 	/* Allocate the inode */
 	error = ERR_PTR(-ENOMEM);
@@ -2794,7 +2782,7 @@
 
 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
 {
-	struct dentry *result = ERR_PTR(-ENOENT);
+	struct dentry *result;
 	struct task_struct *task;
 	unsigned tgid;
 	struct pid_namespace *ns;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 43c1274..2791907 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -343,21 +343,6 @@
 /*
  * Return an inode number between PROC_DYNAMIC_FIRST and
  * 0xffffffff, or zero on failure.
- *
- * Current inode allocations in the proc-fs (hex-numbers):
- *
- * 00000000		reserved
- * 00000001-00000fff	static entries	(goners)
- *      001		root-ino
- *
- * 00001000-00001fff	unused
- * 0001xxxx-7fffxxxx	pid-dir entries for pid 1-7fff
- * 80000000-efffffff	unused
- * f0000000-ffffffff	dynamic entries
- *
- * Goal:
- *	Once we split the thing into several virtual filesystems,
- *	we will get rid of magical ranges (and this comment, BTW).
  */
 static unsigned int get_inode_number(void)
 {
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index c837a77..6f37c39 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -588,7 +588,7 @@
  */
 static void __init proc_kcore_text_init(void)
 {
-	kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT);
+	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
 }
 #else
 static void __init proc_kcore_text_init(void)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 757c069..4258384 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -110,7 +110,6 @@
 	if (err)
 		return;
 	proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-	err = PTR_ERR(proc_mnt);
 	if (IS_ERR(proc_mnt)) {
 		unregister_filesystem(&proc_fs_type);
 		return;
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 6f30c3d..3d3fd46 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -77,6 +77,7 @@
 
 const struct file_operations qnx4_dir_operations =
 {
+	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
 	.readdir	= qnx4_readdir,
 	.fsync		= simple_fsync,
diff --git a/fs/read_write.c b/fs/read_write.c
index 113386d..9c04852 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -97,6 +97,23 @@
 }
 EXPORT_SYMBOL(generic_file_llseek);
 
+/**
+ * noop_llseek - No Operation Performed llseek implementation
+ * @file:	file structure to seek on
+ * @offset:	file offset to seek to
+ * @origin:	type of seek
+ *
+ * This is an implementation of ->llseek useable for the rare special case when
+ * userspace expects the seek to succeed but the (device) file is actually not
+ * able to perform the seek. In this case you use noop_llseek() instead of
+ * falling back to the default implementation of ->llseek.
+ */
+loff_t noop_llseek(struct file *file, loff_t offset, int origin)
+{
+	return file->f_pos;
+}
+EXPORT_SYMBOL(noop_llseek);
+
 loff_t no_llseek(struct file *file, loff_t offset, int origin)
 {
 	return -ESPIPE;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 0793044..4455fbe 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -18,6 +18,7 @@
 			      int datasync);
 
 const struct file_operations reiserfs_dir_operations = {
+	.llseek = generic_file_llseek,
 	.read = generic_read_dir,
 	.readdir = reiserfs_readdir,
 	.fsync = reiserfs_dir_fsync,
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 6c97842..00a70ca 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -37,6 +37,7 @@
 
 const struct file_operations smb_dir_operations =
 {
+	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
 	.readdir	= smb_readdir,
 	.unlocked_ioctl	= smb_ioctl,
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 3a84455..1660c81 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -207,6 +207,7 @@
 
 /* readdir and lookup functions */
 const struct file_operations udf_dir_operations = {
+	.llseek			= generic_file_llseek,
 	.read			= generic_read_dir,
 	.readdir		= udf_readdir,
 	.unlocked_ioctl		= udf_ioctl,
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 14743d9..ad9bc1e 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -918,6 +918,7 @@
 	sbi->s_bytesex = BYTESEX_LE;
 	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
 		case UFS_MAGIC:
+		case UFS_MAGIC_BW:
 		case UFS2_MAGIC:
 		case UFS_MAGIC_LFN:
 	        case UFS_MAGIC_FEA:
@@ -927,6 +928,7 @@
 	sbi->s_bytesex = BYTESEX_BE;
 	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
 		case UFS_MAGIC:
+		case UFS_MAGIC_BW:
 		case UFS2_MAGIC:
 		case UFS_MAGIC_LFN:
 	        case UFS_MAGIC_FEA:
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 6943ec6..8aba544 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -48,6 +48,7 @@
 #define UFS_SECTOR_SIZE 512
 #define UFS_SECTOR_BITS 9
 #define UFS_MAGIC  0x00011954
+#define UFS_MAGIC_BW 0x0f242697
 #define UFS2_MAGIC 0x19540119
 #define UFS_CIGAM  0x54190100 /* byteswapped MAGIC */
 
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 6920695..0c80bb3 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -123,15 +123,7 @@
 						 size_t size,
 						 enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
-
-	BUG_ON(!valid_dma_direction(dir));
-	if (ops->sync_single_range_for_cpu) {
-		ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
-		debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-
-	} else
-		dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+	dma_sync_single_for_cpu(dev, addr + offset, size, dir);
 }
 
 static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -140,15 +132,7 @@
 						    size_t size,
 						    enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
-
-	BUG_ON(!valid_dma_direction(dir));
-	if (ops->sync_single_range_for_device) {
-		ops->sync_single_range_for_device(dev, addr, offset, size, dir);
-		debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
-
-	} else
-		dma_sync_single_for_device(dev, addr + offset, size, dir);
+	dma_sync_single_for_device(dev, addr + offset, size, dir);
 }
 
 static inline void
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 979c6a5..4f3d75e 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -60,7 +60,9 @@
  * @names: if set, must be an array of strings to use as alternative
  *      names for the GPIOs in this chip. Any entry in the array
  *      may be NULL if there is no alias for the GPIO, however the
- *      array must be @ngpio entries long.
+ *      array must be @ngpio entries long.  A name can include a single printk
+ *      format specifier for an unsigned int.  It is substituted by the actual
+ *      number of the gpio.
  *
  * A gpio_chip can help platforms abstract various sources of GPIOs so
  * they can all be accessed through a common programing interface.
@@ -88,6 +90,9 @@
 						unsigned offset);
 	int			(*direction_output)(struct gpio_chip *chip,
 						unsigned offset, int value);
+	int			(*set_debounce)(struct gpio_chip *chip,
+						unsigned offset, unsigned debounce);
+
 	void			(*set)(struct gpio_chip *chip,
 						unsigned offset, int value);
 
@@ -98,7 +103,7 @@
 						struct gpio_chip *chip);
 	int			base;
 	u16			ngpio;
-	char			**names;
+	const char		*const *names;
 	unsigned		can_sleep:1;
 	unsigned		exported:1;
 };
@@ -121,6 +126,8 @@
 extern int gpio_direction_input(unsigned gpio);
 extern int gpio_direction_output(unsigned gpio, int value);
 
+extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
+
 extern int gpio_get_value_cansleep(unsigned gpio);
 extern void gpio_set_value_cansleep(unsigned gpio, int value);
 
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
index 8b94544..5de0735 100644
--- a/include/asm-generic/scatterlist.h
+++ b/include/asm-generic/scatterlist.h
@@ -11,7 +11,9 @@
 	unsigned int	offset;
 	unsigned int	length;
 	dma_addr_t	dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
 	unsigned int	dma_length;
+#endif
 };
 
 /*
@@ -22,22 +24,11 @@
  * is 0.
  */
 #define sg_dma_address(sg)	((sg)->dma_address)
-#ifndef sg_dma_len
-/*
- * Normally, you have an iommu on 64 bit machines, but not on 32 bit
- * machines. Architectures that are differnt should override this.
- */
-#if __BITS_PER_LONG == 64
+
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
 #define sg_dma_len(sg)		((sg)->dma_length)
 #else
 #define sg_dma_len(sg)		((sg)->length)
-#endif /* 64 bit */
-#endif /* sg_dma_len */
-
-#ifndef ISA_DMA_THRESHOLD
-#define ISA_DMA_THRESHOLD	(~0UL)
 #endif
 
-#define ARCH_HAS_SG_CHAIN
-
 #endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 510df36..fd60700 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -34,6 +34,9 @@
 #ifndef cpu_to_node
 #define cpu_to_node(cpu)	((void)(cpu),0)
 #endif
+#ifndef cpu_to_mem
+#define cpu_to_mem(cpu)		((void)(cpu),0)
+#endif
 #ifndef parent_node
 #define parent_node(node)	((void)(node),0)
 #endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e6520..ef779c6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -247,10 +247,10 @@
 	}								\
 									\
 	/* RapidIO route ops */						\
-	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
-		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
-		*(.rio_route_ops)					\
-		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
+	.rio_ops        : AT(ADDR(.rio_ops) - LOAD_OFFSET) {		\
+		VMLINUX_SYMBOL(__start_rio_switch_ops) = .;		\
+		*(.rio_switch_ops)					\
+		VMLINUX_SYMBOL(__end_rio_switch_ops) = .;		\
 	}								\
 									\
 	TRACEDATA							\
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 811dbb3..7a8db41 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -212,6 +212,8 @@
 extern int aio_complete(struct kiocb *iocb, long res, long res2);
 struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
+extern long do_io_submit(aio_context_t ctx_id, long nr,
+			 struct iocb __user *__user *iocbpp, bool compat);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
 static inline int aio_put_req(struct kiocb *iocb) { return 0; }
@@ -219,6 +221,9 @@
 static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
 struct mm_struct;
 static inline void exit_aio(struct mm_struct *mm) { }
+static inline long do_io_submit(aio_context_t ctx_id, long nr,
+				struct iocb __user * __user *iocbpp,
+				bool compat) { return 0; }
 #endif /* CONFIG_AIO */
 
 static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index daf8c48..6fb2720 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -141,6 +141,7 @@
 extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
 extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
 extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
+extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
 
 #define BITMAP_LAST_WORD_MASK(nbits)					\
 (									\
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8f78073..0c62160 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -397,7 +397,7 @@
 	 * This callback must be implemented, if you want provide
 	 * notification functionality.
 	 */
-	int (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
+	void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
 			struct eventfd_ctx *eventfd);
 };
 
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 717c691..168f7da 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -356,5 +356,9 @@
 asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
 				  int flags, int mode);
 
+extern ssize_t compat_rw_copy_check_uvector(int type,
+		const struct compat_iovec __user *uvector, unsigned long nr_segs,
+		unsigned long fast_segs, struct iovec *fast_pointer,
+		struct iovec **ret_pointer);
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 20b51ca..457ed76 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -69,6 +69,7 @@
 					struct task_struct *task);
 
 extern int cpuset_mem_spread_node(void);
+extern int cpuset_slab_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
 {
@@ -194,6 +195,11 @@
 	return 0;
 }
 
+static inline int cpuset_slab_spread_node(void)
+{
+	return 0;
+}
+
 static inline int cpuset_do_page_mem_spread(void)
 {
 	return 0;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 52507c3..75c0fa8 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -156,7 +156,6 @@
 extern struct cred *cred_alloc_blank(void);
 extern struct cred *prepare_creds(void);
 extern struct cred *prepare_exec_creds(void);
-extern struct cred *prepare_usermodehelper_creds(void);
 extern int commit_creds(struct cred *);
 extern void abort_creds(struct cred *);
 extern const struct cred *override_creds(const struct cred *);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ca32ed7..89b7e1a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -40,16 +40,6 @@
 	void (*sync_single_for_device)(struct device *dev,
 				       dma_addr_t dma_handle, size_t size,
 				       enum dma_data_direction dir);
-	void (*sync_single_range_for_cpu)(struct device *dev,
-					  dma_addr_t dma_handle,
-					  unsigned long offset,
-					  size_t size,
-					  enum dma_data_direction dir);
-	void (*sync_single_range_for_device)(struct device *dev,
-					     dma_addr_t dma_handle,
-					     unsigned long offset,
-					     size_t size,
-					     enum dma_data_direction dir);
 	void (*sync_sg_for_cpu)(struct device *dev,
 				struct scatterlist *sg, int nents,
 				enum dma_data_direction dir);
@@ -105,21 +95,6 @@
 #include <asm-generic/dma-mapping-broken.h>
 #endif
 
-/* for backwards compatibility, removed soon */
-static inline void __deprecated dma_sync_single(struct device *dev,
-						dma_addr_t addr, size_t size,
-						enum dma_data_direction dir)
-{
-	dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void __deprecated dma_sync_sg(struct device *dev,
-					    struct scatterlist *sg, int nelems,
-					    enum dma_data_direction dir)
-{
-	dma_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
-
 static inline u64 dma_get_mask(struct device *dev)
 {
 	if (dev && dev->dma_mask && *dev->dma_mask)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b336cb9..9682d52 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2228,6 +2228,7 @@
 
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
 extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
 extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 4e949a5..03f616b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -51,6 +51,11 @@
 	return -ENOSYS;
 }
 
+static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+	return -ENOSYS;
+}
+
 static inline int gpio_get_value(unsigned gpio)
 {
 	/* GPIO can never have been requested or set as {in,out}put */
diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h
index e103366..c04bac8 100644
--- a/include/linux/i2c/max732x.h
+++ b/include/linux/i2c/max732x.h
@@ -7,6 +7,9 @@
 	/* number of the first GPIO */
 	unsigned	gpio_base;
 
+	/* interrupt base */
+	int		irq_base;
+
 	void		*context;	/* param to setup/teardown */
 
 	int		(*setup)(struct i2c_client *client,
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
index d5c5a60..139ba52 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/i2c/pca953x.h
@@ -24,7 +24,7 @@
 	int		(*teardown)(struct i2c_client *client,
 				unsigned gpio, unsigned ngpio,
 				void *context);
-	char		**names;
+	const char	*const *names;
 };
 
 #endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7996fc2..2beaa134 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -16,7 +16,7 @@
 extern struct fs_struct init_fs;
 
 #define INIT_SIGNALS(sig) {						\
-	.count		= ATOMIC_INIT(1), 				\
+	.nr_threads	= 1,						\
 	.wait_chldexit	= __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
 	.shared_pending	= { 						\
 		.list = LIST_HEAD_INIT(sig.shared_pending.list),	\
@@ -35,7 +35,7 @@
 
 #define INIT_SIGHAND(sighand) {						\
 	.count		= ATOMIC_INIT(1), 				\
-	.action		= { { { .sa_handler = NULL, } }, },		\
+	.action		= { { { .sa_handler = SIG_DFL, } }, },		\
 	.siglock	= __SPIN_LOCK_UNLOCKED(sighand.siglock),	\
 	.signalfd_wqh	= __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh),	\
 }
@@ -45,9 +45,9 @@
 #define INIT_STRUCT_PID {						\
 	.count 		= ATOMIC_INIT(1),				\
 	.tasks		= {						\
-		{ .first = &init_task.pids[PIDTYPE_PID].node },		\
-		{ .first = &init_task.pids[PIDTYPE_PGID].node },	\
-		{ .first = &init_task.pids[PIDTYPE_SID].node },		\
+		{ .first = NULL },					\
+		{ .first = NULL },					\
+		{ .first = NULL },					\
 	},								\
 	.level		= 0,						\
 	.numbers	= { {						\
@@ -61,7 +61,7 @@
 {								\
 	.node = {						\
 		.next = NULL,					\
-		.pprev = &init_struct_pid.tasks[type].first,	\
+		.pprev = NULL,					\
 	},							\
 	.pid = &init_struct_pid,				\
 }
@@ -163,6 +163,7 @@
 		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
 		[PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),		\
 	},								\
+	.thread_group	= LIST_HEAD_INIT(tsk.thread_group),		\
 	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
 	INIT_IDS							\
 	INIT_PERF_EVENTS(tsk)						\
diff --git a/include/linux/input.h b/include/linux/input.h
index 83524e4..6fcc9101 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1155,7 +1155,7 @@
 
 	int sync;
 
-	int abs[ABS_MAX + 1];
+	int abs[ABS_CNT];
 	int rep[REP_MAX + 1];
 
 	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
@@ -1163,11 +1163,11 @@
 	unsigned long snd[BITS_TO_LONGS(SND_CNT)];
 	unsigned long sw[BITS_TO_LONGS(SW_CNT)];
 
-	int absmax[ABS_MAX + 1];
-	int absmin[ABS_MAX + 1];
-	int absfuzz[ABS_MAX + 1];
-	int absflat[ABS_MAX + 1];
-	int absres[ABS_MAX + 1];
+	int absmax[ABS_CNT];
+	int absmin[ABS_CNT];
+	int absfuzz[ABS_CNT];
+	int absflat[ABS_CNT];
+	int absres[ABS_CNT];
 
 	int (*open)(struct input_dev *dev);
 	void (*close)(struct input_dev *dev);
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index 9e20c29..47199b1 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -64,8 +64,8 @@
 #define JSIOCSCORR		_IOW('j', 0x21, struct js_corr)			/* set correction values */
 #define JSIOCGCORR		_IOR('j', 0x22, struct js_corr)			/* get correction values */
 
-#define JSIOCSAXMAP		_IOW('j', 0x31, __u8[ABS_MAX + 1])		/* set axis mapping */
-#define JSIOCGAXMAP		_IOR('j', 0x32, __u8[ABS_MAX + 1])		/* get axis mapping */
+#define JSIOCSAXMAP		_IOW('j', 0x31, __u8[ABS_CNT])			/* set axis mapping */
+#define JSIOCGAXMAP		_IOR('j', 0x32, __u8[ABS_CNT])			/* get axis mapping */
 #define JSIOCSBTNMAP		_IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1])	/* set button mapping */
 #define JSIOCGBTNMAP		_IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1])	/* get button mapping */
 
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index facb27f..6efd7a7 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -23,6 +23,7 @@
 #include <linux/stddef.h>
 #include <linux/errno.h>
 #include <linux/compiler.h>
+#include <linux/workqueue.h>
 
 #define KMOD_PATH_LEN 256
 
@@ -45,19 +46,6 @@
 
 struct key;
 struct file;
-struct subprocess_info;
-
-/* Allocate a subprocess_info structure */
-struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
-						  char **envp, gfp_t gfp_mask);
-
-/* Set various pieces of state into the subprocess_info structure */
-void call_usermodehelper_setkeys(struct subprocess_info *info,
-				 struct key *session_keyring);
-int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
-				  struct file **filp);
-void call_usermodehelper_setcleanup(struct subprocess_info *info,
-				    void (*cleanup)(char **argv, char **envp));
 
 enum umh_wait {
 	UMH_NO_WAIT = -1,	/* don't wait at all */
@@ -65,6 +53,29 @@
 	UMH_WAIT_PROC = 1,	/* wait for the process to complete */
 };
 
+struct subprocess_info {
+	struct work_struct work;
+	struct completion *complete;
+	char *path;
+	char **argv;
+	char **envp;
+	enum umh_wait wait;
+	int retval;
+	int (*init)(struct subprocess_info *info);
+	void (*cleanup)(struct subprocess_info *info);
+	void *data;
+};
+
+/* Allocate a subprocess_info structure */
+struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
+						  char **envp, gfp_t gfp_mask);
+
+/* Set various pieces of state into the subprocess_info structure */
+void call_usermodehelper_setfns(struct subprocess_info *info,
+		    int (*init)(struct subprocess_info *info),
+		    void (*cleanup)(struct subprocess_info *info),
+		    void *data);
+
 /* Actually execute the sub-process */
 int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
 
@@ -73,38 +84,33 @@
 void call_usermodehelper_freeinfo(struct subprocess_info *info);
 
 static inline int
-call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
+call_usermodehelper_fns(char *path, char **argv, char **envp,
+			enum umh_wait wait,
+			int (*init)(struct subprocess_info *info),
+			void (*cleanup)(struct subprocess_info *), void *data)
 {
 	struct subprocess_info *info;
 	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
 
 	info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
 	if (info == NULL)
 		return -ENOMEM;
+
+	call_usermodehelper_setfns(info, init, cleanup, data);
+
 	return call_usermodehelper_exec(info, wait);
 }
 
 static inline int
-call_usermodehelper_keys(char *path, char **argv, char **envp,
-			 struct key *session_keyring, enum umh_wait wait)
+call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
 {
-	struct subprocess_info *info;
-	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-
-	info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
-	if (info == NULL)
-		return -ENOMEM;
-
-	call_usermodehelper_setkeys(info, session_keyring);
-	return call_usermodehelper_exec(info, wait);
+	return call_usermodehelper_fns(path, argv, envp, wait,
+				       NULL, NULL, NULL);
 }
 
 extern void usermodehelper_init(void);
 
-struct file;
-extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
-				    struct file **filp);
-
 extern int usermodehelper_disable(void);
 extern void usermodehelper_enable(void);
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0589479..9411d32 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -90,7 +90,8 @@
 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
 
 extern int
-mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
+mem_cgroup_prepare_migration(struct page *page,
+	struct page *newpage, struct mem_cgroup **ptr);
 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
 	struct page *oldpage, struct page *newpage);
 
@@ -227,7 +228,8 @@
 }
 
 static inline int
-mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
+mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
+	struct mem_cgroup **ptr)
 {
 	return 0;
 }
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3196c84..f65913c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -230,7 +230,7 @@
 #define mmc_classdev(x)	(&(x)->class_dev)
 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
 
-extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
+extern int mmc_suspend_host(struct mmc_host *);
 extern int mmc_resume_host(struct mmc_host *);
 
 extern void mmc_power_save_host(struct mmc_host *host);
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
new file mode 100644
index 0000000..9188c97
--- /dev/null
+++ b/include/linux/mmc/sdhci-spear.h
@@ -0,0 +1,42 @@
+/*
+ * include/linux/mmc/sdhci-spear.h
+ *
+ * SDHCI declarations specific to ST SPEAr platform
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef MMC_SDHCI_SPEAR_H
+#define MMC_SDHCI_SPEAR_H
+
+#include <linux/platform_device.h>
+/*
+ * struct sdhci_plat_data: spear sdhci platform data structure
+ *
+ * @card_power_gpio: gpio pin for enabling/disabling power to sdhci socket
+ * @power_active_high: if set, enable power to sdhci socket by setting
+ *			card_power_gpio
+ * @power_always_enb: If set, then enable power on probe, otherwise enable only
+ *			on card insertion and disable on card removal.
+ * card_int_gpio: gpio pin used for card detection
+ */
+struct sdhci_plat_data {
+	int card_power_gpio;
+	int power_active_high;
+	int power_always_enb;
+	int card_int_gpio;
+};
+
+/* This function is used to set platform_data field of pdev->dev */
+static inline void
+sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data)
+{
+	pdev->dev.platform_data = data;
+}
+
+#endif /* MMC_SDHCI_SPEAR_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index c6c0cce..31baaf8 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -145,6 +145,9 @@
 extern void sdio_writel(struct sdio_func *func, u32 b,
 	unsigned int addr, int *err_ret);
 
+extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+	unsigned int addr, int *err_ret);
+
 extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
 	void *src, int count);
 extern int sdio_writesb(struct sdio_func *func, unsigned int addr,
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
new file mode 100644
index 0000000..aafe832
--- /dev/null
+++ b/include/linux/mmc/sh_mmcif.h
@@ -0,0 +1,39 @@
+/*
+ * include/linux/mmc/sh_mmcif.h
+ *
+ * platform data for eMMC driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ */
+
+#ifndef __SH_MMCIF_H__
+#define __SH_MMCIF_H__
+
+/*
+ * MMCIF : CE_CLK_CTRL [19:16]
+ * 1000 : Peripheral clock / 512
+ * 0111 : Peripheral clock / 256
+ * 0110 : Peripheral clock / 128
+ * 0101 : Peripheral clock / 64
+ * 0100 : Peripheral clock / 32
+ * 0011 : Peripheral clock / 16
+ * 0010 : Peripheral clock / 8
+ * 0001 : Peripheral clock / 4
+ * 0000 : Peripheral clock / 2
+ * 1111 : Peripheral clock (sup_pclk set '1')
+ */
+
+struct sh_mmcif_plat_data {
+	void (*set_pwr)(struct platform_device *pdev, int state);
+	void (*down_pwr)(struct platform_device *pdev);
+	u8	sup_pclk;	/* 1 :SH7757, 0: SH7724/SH7372 */
+	unsigned long caps;
+	u32	ocr;
+};
+
+#endif /* __SH_MMCIF_H__ */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0fa4913..b4d109e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -671,6 +671,12 @@
 static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
 #endif
 
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+int local_memory_node(int node_id);
+#else
+static inline int local_memory_node(int node_id) { return node_id; };
+#endif
+
 #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
 unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
 #endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index dba35e4..8a8f1d0 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -66,6 +66,8 @@
  * int num_online_nodes()		Number of online Nodes
  * int num_possible_nodes()		Number of all possible Nodes
  *
+ * int node_random(mask)		Random node with set bit in mask
+ *
  * int node_online(node)		Is some node online?
  * int node_possible(node)		Is some node possible?
  *
@@ -430,6 +432,10 @@
 	node_clear_state(nid, N_ONLINE);
 	nr_online_nodes = num_node_state(N_ONLINE);
 }
+
+#define node_random(mask) __node_random(&(mask))
+extern int __node_random(const nodemask_t *maskp);
+
 #else
 
 static inline int node_state(int node, enum node_states state)
@@ -460,6 +466,8 @@
 
 #define node_set_online(node)	   node_set_state((node), N_ONLINE)
 #define node_set_offline(node)	   node_clear_state((node), N_ONLINE)
+
+static inline int node_random(const nodemask_t mask) { return 0; }
 #endif
 
 #define node_online_map 	node_states[N_ONLINE]
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 7c36096..540703b 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -164,7 +164,10 @@
 /* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
 static inline int notifier_from_errno(int err)
 {
-	return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
+	if (err)
+		return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
+
+	return NOTIFY_OK;
 }
 
 /* Restore (negative) errno value from notify return value. */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index aef22ae..5bb13b3 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -40,6 +40,7 @@
 	PCG_USED, /* this object is in use. */
 	PCG_ACCT_LRU, /* page has been accounted for */
 	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
+	PCG_MIGRATION, /* under page migration */
 };
 
 #define TESTPCGFLAG(uname, lname)			\
@@ -79,6 +80,10 @@
 CLEARPCGFLAG(FileMapped, FILE_MAPPED)
 TESTPCGFLAG(FileMapped, FILE_MAPPED)
 
+SETPCGFLAG(Migration, MIGRATION)
+CLEARPCGFLAG(Migration, MIGRATION)
+TESTPCGFLAG(Migration, MIGRATION)
+
 static inline int page_cgroup_nid(struct page_cgroup *pc)
 {
 	return page_to_nid(pc->page);
diff --git a/include/linux/random.h b/include/linux/random.h
index 25d02fe..fb7ab9d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -40,6 +40,10 @@
 	__u32	buf[0];
 };
 
+struct rnd_state {
+	__u32 s1, s2, s3;
+};
+
 /* Exported functions */
 
 #ifdef __KERNEL__
@@ -74,6 +78,30 @@
 u32 random32(void);
 void srandom32(u32 seed);
 
+u32 prandom32(struct rnd_state *);
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+	return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom32_seed - set seed for prandom32().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom32_seed(struct rnd_state *state, u64 seed)
+{
+	u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+	state->s1 = __seed(i, 1);
+	state->s2 = __seed(i, 7);
+	state->s3 = __seed(i, 15);
+}
+
 #endif /* __KERNEL___ */
 
 #endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index dc0c755..19b5f22 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -64,10 +64,13 @@
 #define RIO_INB_MBOX_RESOURCE	1
 #define RIO_OUTB_MBOX_RESOURCE	2
 
+#define RIO_PW_MSG_SIZE		64
+
 extern struct bus_type rio_bus_type;
 extern struct list_head rio_devices;	/* list of all devices */
 
 struct rio_mport;
+union rio_pw_msg;
 
 /**
  * struct rio_dev - RIO device info
@@ -107,11 +110,15 @@
 	u32 swpinfo;		/* Only used for switches */
 	u32 src_ops;
 	u32 dst_ops;
+	u32 comp_tag;
+	u32 phys_efptr;
+	u32 em_efptr;
 	u64 dma_mask;
 	struct rio_switch *rswitch;	/* RIO switch info */
 	struct rio_driver *driver;	/* RIO driver claiming this device */
 	struct device dev;	/* LDM device structure */
 	struct resource riores[RIO_MAX_DEV_RESOURCES];
+	int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
 	u16 destid;
 };
 
@@ -211,8 +218,12 @@
  * @hopcount: Hopcount to this switch
  * @destid: Associated destid in the path
  * @route_table: Copy of switch routing table
+ * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
  * @add_entry: Callback for switch-specific route add function
  * @get_entry: Callback for switch-specific route get function
+ * @clr_table: Callback for switch-specific clear route table function
+ * @em_init: Callback for switch-specific error management initialization function
+ * @em_handle: Callback for switch-specific error management handler function
  */
 struct rio_switch {
 	struct list_head node;
@@ -220,10 +231,19 @@
 	u16 hopcount;
 	u16 destid;
 	u8 *route_table;
+	u32 port_ok;
 	int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
 			  u16 table, u16 route_destid, u8 route_port);
 	int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
 			  u16 table, u16 route_destid, u8 * route_port);
+	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table);
+	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 sw_domain);
+	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 *sw_domain);
+	int (*em_init) (struct rio_dev *dev);
+	int (*em_handle) (struct rio_dev *dev, u8 swport);
 };
 
 /* Low-level architecture-dependent routines */
@@ -235,6 +255,7 @@
  * @cread: Callback to perform network read of config space.
  * @cwrite: Callback to perform network write of config space.
  * @dsend: Callback to send a doorbell message.
+ * @pwenable: Callback to enable/disable port-write message handling.
  */
 struct rio_ops {
 	int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -246,6 +267,7 @@
 	int (*cwrite) (struct rio_mport *mport, int index, u16 destid,
 			u8 hopcount, u32 offset, int len, u32 data);
 	int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data);
+	int (*pwenable) (struct rio_mport *mport, int enable);
 };
 
 #define RIO_RESOURCE_MEM	0x00000100
@@ -302,21 +324,28 @@
 };
 
 /**
- * struct rio_route_ops - Per-switch route operations
+ * struct rio_switch_ops - Per-switch operations
  * @vid: RIO vendor ID
  * @did: RIO device ID
- * @add_hook: Callback that adds a route entry
- * @get_hook: Callback that gets a route entry
+ * @init_hook: Callback that performs switch device initialization
  *
- * Defines the operations that are necessary to manipulate the route
- * tables for a particular RIO switch device.
+ * Defines the operations that are necessary to initialize/control
+ * a particular RIO switch device.
  */
-struct rio_route_ops {
+struct rio_switch_ops {
 	u16 vid, did;
-	int (*add_hook) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			 u16 table, u16 route_destid, u8 route_port);
-	int (*get_hook) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			 u16 table, u16 route_destid, u8 * route_port);
+	int (*init_hook) (struct rio_dev *rdev, int do_enum);
+};
+
+union rio_pw_msg {
+	struct {
+		u32 comptag;	/* Component Tag CSR */
+		u32 errdetect;	/* Port N Error Detect CSR */
+		u32 is_port;	/* Implementation specific + PortID */
+		u32 ltlerrdet;	/* LTL Error Detect CSR */
+		u32 padding[12];
+	} em;
+	u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
 };
 
 /* Architecture and hardware-specific functions */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index c93a58a..edc55da 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -413,6 +413,12 @@
 int rio_request_region(struct rio_dev *, int, char *);
 void rio_release_region(struct rio_dev *, int);
 
+/* Port-Write management */
+extern int rio_request_inb_pwrite(struct rio_dev *,
+			int (*)(struct rio_dev *, union rio_pw_msg*, int));
+extern int rio_release_inb_pwrite(struct rio_dev *);
+extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
+
 /* LDM support */
 int rio_register_driver(struct rio_driver *);
 void rio_unregister_driver(struct rio_driver *);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 919d4e0..db50e1c 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -20,5 +20,19 @@
 
 #define RIO_VID_TUNDRA			0x000d
 #define RIO_DID_TSI500			0x0500
+#define RIO_DID_TSI568			0x0568
+#define RIO_DID_TSI572			0x0572
+#define RIO_DID_TSI574			0x0574
+#define RIO_DID_TSI576			0x0578 /* Same ID as Tsi578 */
+#define RIO_DID_TSI577			0x0577
+#define RIO_DID_TSI578			0x0578
+
+#define RIO_VID_IDT			0x0038
+#define RIO_DID_IDT70K200		0x0310
+#define RIO_DID_IDTCPS8			0x035c
+#define RIO_DID_IDTCPS12		0x035d
+#define RIO_DID_IDTCPS16		0x035b
+#define RIO_DID_IDTCPS6Q		0x035f
+#define RIO_DID_IDTCPS10Q		0x035e
 
 #endif				/* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 326540f..aedee04 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -39,6 +39,8 @@
 #define  RIO_PEF_INB_MBOX2		0x00200000	/* [II] Mailbox 2 */
 #define  RIO_PEF_INB_MBOX3		0x00100000	/* [II] Mailbox 3 */
 #define  RIO_PEF_INB_DOORBELL		0x00080000	/* [II] Doorbells */
+#define  RIO_PEF_EXT_RT			0x00000200	/* [III, 1.3] Extended route table support */
+#define  RIO_PEF_STD_RT			0x00000100	/* [III, 1.3] Standard route table support */
 #define  RIO_PEF_CTLS			0x00000010	/* [III] CTLS */
 #define  RIO_PEF_EXT_FEATURES		0x00000008	/* [I] EFT_PTR valid */
 #define  RIO_PEF_ADDR_66		0x00000004	/* [I] 66 bits */
@@ -91,7 +93,10 @@
 #define  RIO_OPS_ATOMIC_CLR		0x00000010	/* [I] Atomic clr op */
 #define  RIO_OPS_PORT_WRITE		0x00000004	/* [I] Port-write op */
 
-					/* 0x20-0x3c *//* Reserved */
+					/* 0x20-0x30 *//* Reserved */
+
+#define	RIO_SWITCH_RT_LIMIT	0x34	/* [III, 1.3] Switch Route Table Destination ID Limit CAR */
+#define	 RIO_RT_MAX_DESTID		0x0000ffff
 
 #define RIO_MBOX_CSR		0x40	/* [II] Mailbox CSR */
 #define  RIO_MBOX0_AVAIL		0x80000000	/* [II] Mbox 0 avail */
@@ -153,7 +158,11 @@
 #define RIO_HOST_DID_LOCK_CSR	0x68	/* [III] Host Base Device ID Lock CSR */
 #define RIO_COMPONENT_TAG_CSR	0x6c	/* [III] Component Tag CSR */
 
-					/* 0x70-0xf8 *//* Reserved */
+#define RIO_STD_RTE_CONF_DESTID_SEL_CSR	0x70
+#define RIO_STD_RTE_CONF_PORT_SEL_CSR	0x74
+#define RIO_STD_RTE_DEFAULT_PORT	0x78
+
+					/* 0x7c-0xf8 *//* Reserved */
 					/* 0x100-0xfff8 *//* [I] Extended Features Space */
 					/* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */
 
@@ -183,9 +192,14 @@
 #define RIO_EFB_PAR_EP_ID	0x0001	/* [IV] LP/LVDS EP Devices */
 #define RIO_EFB_PAR_EP_REC_ID	0x0002	/* [IV] LP/LVDS EP Recovery Devices */
 #define RIO_EFB_PAR_EP_FREE_ID	0x0003	/* [IV] LP/LVDS EP Free Devices */
+#define RIO_EFB_SER_EP_ID_V13P	0x0001	/* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
+#define RIO_EFB_SER_EP_REC_ID_V13P	0x0002	/* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
+#define RIO_EFB_SER_EP_FREE_ID_V13P	0x0003	/* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
 #define RIO_EFB_SER_EP_ID	0x0004	/* [VI] LP/Serial EP Devices */
 #define RIO_EFB_SER_EP_REC_ID	0x0005	/* [VI] LP/Serial EP Recovery Devices */
 #define RIO_EFB_SER_EP_FREE_ID	0x0006	/* [VI] LP/Serial EP Free Devices */
+#define RIO_EFB_SER_EP_FREC_ID	0x0009  /* [VI] LP/Serial EP Free Recovery Devices */
+#define RIO_EFB_ERR_MGMNT	0x0007  /* [VIII] Error Management Extensions */
 
 /*
  * Physical 8/16 LP-LVDS
@@ -201,15 +215,71 @@
 #define RIO_PORT_MNT_HEADER		0x0000
 #define RIO_PORT_REQ_CTL_CSR		0x0020
 #define RIO_PORT_RSP_CTL_CSR		0x0024	/* 0x0001/0x0002 */
+#define RIO_PORT_LINKTO_CTL_CSR		0x0020	/* Serial */
+#define RIO_PORT_RSPTO_CTL_CSR		0x0024	/* Serial */
 #define RIO_PORT_GEN_CTL_CSR		0x003c
 #define  RIO_PORT_GEN_HOST		0x80000000
 #define  RIO_PORT_GEN_MASTER		0x40000000
 #define  RIO_PORT_GEN_DISCOVERED	0x20000000
 #define RIO_PORT_N_MNT_REQ_CSR(x)	(0x0040 + x*0x20)	/* 0x0002 */
 #define RIO_PORT_N_MNT_RSP_CSR(x)	(0x0044 + x*0x20)	/* 0x0002 */
+#define  RIO_PORT_N_MNT_RSP_RVAL	0x80000000 /* Response Valid */
+#define  RIO_PORT_N_MNT_RSP_ASTAT	0x000003e0 /* ackID Status */
+#define  RIO_PORT_N_MNT_RSP_LSTAT	0x0000001f /* Link Status */
 #define RIO_PORT_N_ACK_STS_CSR(x)	(0x0048 + x*0x20)	/* 0x0002 */
-#define RIO_PORT_N_ERR_STS_CSR(x)	(0x58 + x*0x20)
-#define PORT_N_ERR_STS_PORT_OK	0x00000002
-#define RIO_PORT_N_CTL_CSR(x)		(0x5c + x*0x20)
+#define  RIO_PORT_N_ACK_CLEAR		0x80000000
+#define  RIO_PORT_N_ACK_INBOUND		0x1f000000
+#define  RIO_PORT_N_ACK_OUTSTAND	0x00001f00
+#define  RIO_PORT_N_ACK_OUTBOUND	0x0000001f
+#define RIO_PORT_N_ERR_STS_CSR(x)	(0x0058 + x*0x20)
+#define  RIO_PORT_N_ERR_STS_PW_OUT_ES	0x00010000 /* Output Error-stopped */
+#define  RIO_PORT_N_ERR_STS_PW_INP_ES	0x00000100 /* Input Error-stopped */
+#define  RIO_PORT_N_ERR_STS_PW_PEND	0x00000010 /* Port-Write Pending */
+#define  RIO_PORT_N_ERR_STS_PORT_ERR	0x00000004
+#define  RIO_PORT_N_ERR_STS_PORT_OK	0x00000002
+#define  RIO_PORT_N_ERR_STS_PORT_UNINIT	0x00000001
+#define  RIO_PORT_N_ERR_STS_CLR_MASK	0x07120204
+#define RIO_PORT_N_CTL_CSR(x)		(0x005c + x*0x20)
+#define  RIO_PORT_N_CTL_PWIDTH		0xc0000000
+#define  RIO_PORT_N_CTL_PWIDTH_1	0x00000000
+#define  RIO_PORT_N_CTL_PWIDTH_4	0x40000000
+#define  RIO_PORT_N_CTL_P_TYP_SER	0x00000001
+#define  RIO_PORT_N_CTL_LOCKOUT		0x00000002
+#define  RIO_PORT_N_CTL_EN_RX_SER	0x00200000
+#define  RIO_PORT_N_CTL_EN_TX_SER	0x00400000
+#define  RIO_PORT_N_CTL_EN_RX_PAR	0x08000000
+#define  RIO_PORT_N_CTL_EN_TX_PAR	0x40000000
+
+/*
+ * Error Management Extensions (RapidIO 1.3+, Part 8)
+ *
+ * Extended Features Block ID=0x0007
+ */
+
+/* General EM Registers (Common for all Ports) */
+
+#define RIO_EM_EFB_HEADER	0x000	/* Error Management Extensions Block Header */
+#define RIO_EM_LTL_ERR_DETECT	0x008	/* Logical/Transport Layer Error Detect CSR */
+#define RIO_EM_LTL_ERR_EN	0x00c	/* Logical/Transport Layer Error Enable CSR */
+#define RIO_EM_LTL_HIADDR_CAP	0x010	/* Logical/Transport Layer High Address Capture CSR */
+#define RIO_EM_LTL_ADDR_CAP	0x014	/* Logical/Transport Layer Address Capture CSR */
+#define RIO_EM_LTL_DEVID_CAP	0x018	/* Logical/Transport Layer Device ID Capture CSR */
+#define RIO_EM_LTL_CTRL_CAP	0x01c	/* Logical/Transport Layer Control Capture CSR */
+#define RIO_EM_PW_TGT_DEVID	0x028	/* Port-write Target deviceID CSR */
+#define RIO_EM_PKT_TTL		0x02c	/* Packet Time-to-live CSR */
+
+/* Per-Port EM Registers */
+
+#define RIO_EM_PN_ERR_DETECT(x)	(0x040 + x*0x40) /* Port N Error Detect CSR */
+#define  REM_PED_IMPL_SPEC		0x80000000
+#define  REM_PED_LINK_TO		0x00000001
+#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
+#define RIO_EM_PN_ATTRIB_CAP(x)	(0x048 + x*0x40) /* Port N Attributes Capture CSR */
+#define RIO_EM_PN_PKT_CAP_0(x)	(0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
+#define RIO_EM_PN_PKT_CAP_1(x)	(0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
+#define RIO_EM_PN_PKT_CAP_2(x)	(0x054 + x*0x40) /* Port N Packet Capture 2 CSR */
+#define RIO_EM_PN_PKT_CAP_3(x)	(0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
+#define RIO_EM_PN_ERRRATE(x)	(0x068 + x*0x40) /* Port N Error Rate CSR */
+#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
 
 #endif				/* LINUX_RIO_REGS_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c0151ff..f118809 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -268,7 +268,6 @@
 extern void init_idle_bootup_task(struct task_struct *idle);
 
 extern int runqueue_is_locked(int cpu);
-extern void task_rq_unlock_wait(struct task_struct *p);
 
 extern cpumask_var_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -527,8 +526,9 @@
  * the locking of signal_struct.
  */
 struct signal_struct {
-	atomic_t		count;
+	atomic_t		sigcnt;
 	atomic_t		live;
+	int			nr_threads;
 
 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
 
@@ -1423,6 +1423,7 @@
 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
 	int mems_allowed_change_disable;
 	int cpuset_mem_spread_rotor;
+	int cpuset_slab_spread_rotor;
 #endif
 #ifdef CONFIG_CGROUPS
 	/* Control Group info protected by css_set_lock */
@@ -2035,7 +2036,7 @@
 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
 extern void force_sig(int, struct task_struct *);
 extern int send_sig(int, struct task_struct *, int);
-extern void zap_other_threads(struct task_struct *p);
+extern int zap_other_threads(struct task_struct *p);
 extern struct sigqueue *sigqueue_alloc(void);
 extern void sigqueue_free(struct sigqueue *);
 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
@@ -2100,7 +2101,6 @@
 extern void exit_thread(void);
 
 extern void exit_files(struct task_struct *);
-extern void __cleanup_signal(struct signal_struct *);
 extern void __cleanup_sighand(struct sighand_struct *);
 
 extern void exit_itimers(struct signal_struct *);
@@ -2147,6 +2147,11 @@
 #define while_each_thread(g, t) \
 	while ((t = next_thread(t)) != g)
 
+static inline int get_nr_threads(struct task_struct *tsk)
+{
+	return tsk->signal->nr_threads;
+}
+
 /* de_thread depends on thread_group_leader not being a pid based check */
 #define thread_group_leader(p)	(p == p->group_leader)
 
@@ -2393,10 +2398,6 @@
 	spin_lock_init(&sig->cputimer.lock);
 }
 
-static inline void thread_group_cputime_free(struct signal_struct *sig)
-{
-}
-
 /*
  * Reevaluate whether the task has signals pending delivery.
  * Wake the task if so.
diff --git a/include/linux/sdhci-pltfm.h b/include/linux/sdhci-pltfm.h
new file mode 100644
index 0000000..0239bd7
--- /dev/null
+++ b/include/linux/sdhci-pltfm.h
@@ -0,0 +1,35 @@
+/*
+ * Platform data declarations for the sdhci-pltfm driver.
+ *
+ * Copyright (c) 2010 MontaVista Software, LLC.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef _SDHCI_PLTFM_H
+#define _SDHCI_PLTFM_H
+
+struct sdhci_ops;
+struct sdhci_host;
+
+/**
+ * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks
+ * @ops: optional pointer to the platform-provided SDHCI ops
+ * @quirks: optional SDHCI quirks
+ * @init: optional hook that is called during device probe, before the
+ *        driver tries to access any SDHCI registers
+ * @exit: optional hook that is called during device removal
+ */
+struct sdhci_pltfm_data {
+	struct sdhci_ops *ops;
+	unsigned int quirks;
+	int (*init)(struct sdhci_host *host);
+	void (*exit)(struct sdhci_host *host);
+};
+
+#endif /* _SDHCI_PLTFM_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 8a4adbef..f2961af 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -79,6 +79,7 @@
 #ifdef __KERNEL__
 #include <asm/atomic.h>
 #include <linux/rcupdate.h>
+#include <linux/cache.h>
 
 struct task_struct;
 
@@ -91,7 +92,8 @@
 
 /* One sem_array data structure for each set of semaphores in the system. */
 struct sem_array {
-	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
+	struct kern_ipc_perm	____cacheline_aligned_in_smp
+				sem_perm;	/* permissions .. see ipc.h */
 	time_t			sem_otime;	/* last semop time */
 	time_t			sem_ctime;	/* last change time */
 	struct sem		*sem_base;	/* ptr to first semaphore in array */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b6b6143..ff4acea 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -282,6 +282,11 @@
 extern int shmem_unuse(swp_entry_t entry, struct page *page);
 #endif /* CONFIG_MMU */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+					struct page **pagep, swp_entry_t *ent);
+#endif
+
 extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
 
 #ifdef CONFIG_SWAP
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index febedcf..81a4e21 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,16 +73,6 @@
 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			   int nelems, enum dma_data_direction dir);
 
-extern void
-swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				  unsigned long offset, size_t size,
-				  enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				     unsigned long offset, size_t size,
-				     enum dma_data_direction dir);
-
 extern int
 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
 
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 052b12b..383ab95 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -33,4 +33,13 @@
 #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
 	(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
 
+/*
+ * Define a minimum number of pids per cpu.  Heuristically based
+ * on original pid max of 32k for 32 cpus.  Also, increase the
+ * minimum settable value for pid_max on the running system based
+ * on similar defaults.  See kernel/pid.c:pidmap_init() for details.
+ */
+#define PIDS_PER_CPU_DEFAULT	1024
+#define PIDS_PER_CPU_MIN	8
+
 #endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 5b81156..c44df50 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -31,6 +31,7 @@
 #include <linux/bitops.h>
 #include <linux/mmzone.h>
 #include <linux/smp.h>
+#include <linux/percpu.h>
 #include <asm/topology.h>
 
 #ifndef node_has_online_mem
@@ -203,8 +204,114 @@
 #ifndef SD_NODE_INIT
 #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
 #endif
+
 #endif /* CONFIG_NUMA */
 
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+DECLARE_PER_CPU(int, numa_node);
+
+#ifndef numa_node_id
+/* Returns the number of the current Node. */
+static inline int numa_node_id(void)
+{
+	return __this_cpu_read(numa_node);
+}
+#endif
+
+#ifndef cpu_to_node
+static inline int cpu_to_node(int cpu)
+{
+	return per_cpu(numa_node, cpu);
+}
+#endif
+
+#ifndef set_numa_node
+static inline void set_numa_node(int node)
+{
+	percpu_write(numa_node, node);
+}
+#endif
+
+#ifndef set_cpu_numa_node
+static inline void set_cpu_numa_node(int cpu, int node)
+{
+	per_cpu(numa_node, cpu) = node;
+}
+#endif
+
+#else	/* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
+
+/* Returns the number of the current Node. */
+#ifndef numa_node_id
+static inline int numa_node_id(void)
+{
+	return cpu_to_node(raw_smp_processor_id());
+}
+#endif
+
+#endif	/* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+
+/*
+ * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
+ * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
+ * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
+ */
+DECLARE_PER_CPU(int, _numa_mem_);
+
+#ifndef set_numa_mem
+static inline void set_numa_mem(int node)
+{
+	percpu_write(_numa_mem_, node);
+}
+#endif
+
+#ifndef numa_mem_id
+/* Returns the number of the nearest Node with memory */
+static inline int numa_mem_id(void)
+{
+	return __this_cpu_read(_numa_mem_);
+}
+#endif
+
+#ifndef cpu_to_mem
+static inline int cpu_to_mem(int cpu)
+{
+	return per_cpu(_numa_mem_, cpu);
+}
+#endif
+
+#ifndef set_cpu_numa_mem
+static inline void set_cpu_numa_mem(int cpu, int node)
+{
+	per_cpu(_numa_mem_, cpu) = node;
+}
+#endif
+
+#else	/* !CONFIG_HAVE_MEMORYLESS_NODES */
+
+static inline void set_numa_mem(int node) {}
+
+static inline void set_cpu_numa_mem(int cpu, int node) {}
+
+#ifndef numa_mem_id
+/* Returns the number of the nearest Node with memory */
+static inline int numa_mem_id(void)
+{
+	return numa_node_id();
+}
+#endif
+
+#ifndef cpu_to_mem
+static inline int cpu_to_mem(int cpu)
+{
+	return cpu_to_node(cpu);
+}
+#endif
+
+#endif	/* [!]CONFIG_HAVE_MEMORYLESS_NODES */
+
 #ifndef topology_physical_package_id
 #define topology_physical_package_id(cpu)	((void)(cpu), -1)
 #endif
@@ -218,9 +325,4 @@
 #define topology_core_cpumask(cpu)		cpumask_of(cpu)
 #endif
 
-/* Returns the number of the current Node. */
-#ifndef numa_node_id
-#define numa_node_id()		(cpu_to_node(raw_smp_processor_id()))
-#endif
-
 #endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 15ddd44..60c81da 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -166,11 +166,11 @@
 struct uinput_user_dev {
 	char name[UINPUT_MAX_NAME_SIZE];
 	struct input_id id;
-        int ff_effects_max;
-        int absmax[ABS_MAX + 1];
-        int absmin[ABS_MAX + 1];
-        int absfuzz[ABS_MAX + 1];
-        int absflat[ABS_MAX + 1];
+	int ff_effects_max;
+	int absmax[ABS_CNT];
+	int absmin[ABS_CNT];
+	int absfuzz[ABS_CNT];
+	int absflat[ABS_CNT];
 };
 #endif	/* __UINPUT_H_ */
 
diff --git a/ipc/sem.c b/ipc/sem.c
index dbef95b..506c849 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -3,56 +3,6 @@
  * Copyright (C) 1992 Krishna Balasubramanian
  * Copyright (C) 1995 Eric Schenk, Bruno Haible
  *
- * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
- * This code underwent a massive rewrite in order to solve some problems
- * with the original code. In particular the original code failed to
- * wake up processes that were waiting for semval to go to 0 if the
- * value went to 0 and was then incremented rapidly enough. In solving
- * this problem I have also modified the implementation so that it
- * processes pending operations in a FIFO manner, thus give a guarantee
- * that processes waiting for a lock on the semaphore won't starve
- * unless another locking process fails to unlock.
- * In addition the following two changes in behavior have been introduced:
- * - The original implementation of semop returned the value
- *   last semaphore element examined on success. This does not
- *   match the manual page specifications, and effectively
- *   allows the user to read the semaphore even if they do not
- *   have read permissions. The implementation now returns 0
- *   on success as stated in the manual page.
- * - There is some confusion over whether the set of undo adjustments
- *   to be performed at exit should be done in an atomic manner.
- *   That is, if we are attempting to decrement the semval should we queue
- *   up and wait until we can do so legally?
- *   The original implementation attempted to do this.
- *   The current implementation does not do so. This is because I don't
- *   think it is the right thing (TM) to do, and because I couldn't
- *   see a clean way to get the old behavior with the new design.
- *   The POSIX standard and SVID should be consulted to determine
- *   what behavior is mandated.
- *
- * Further notes on refinement (Christoph Rohland, December 1998):
- * - The POSIX standard says, that the undo adjustments simply should
- *   redo. So the current implementation is o.K.
- * - The previous code had two flaws:
- *   1) It actively gave the semaphore to the next waiting process
- *      sleeping on the semaphore. Since this process did not have the
- *      cpu this led to many unnecessary context switches and bad
- *      performance. Now we only check which process should be able to
- *      get the semaphore and if this process wants to reduce some
- *      semaphore value we simply wake it up without doing the
- *      operation. So it has to try to get it later. Thus e.g. the
- *      running process may reacquire the semaphore during the current
- *      time slice. If it only waits for zero or increases the semaphore,
- *      we do the operation in advance and wake it up.
- *   2) It did not wake up all zero waiting processes. We try to do
- *      better but only get the semops right which only wait for zero or
- *      increase. If there are decrement operations in the operations
- *      array we do the same as before.
- *
- * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
- * check/retry algorithm for waking up blocked processes as the new scheduler
- * is better at handling thread switch than the old one.
- *
  * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  *
  * SMP-threaded, sysctl's added
@@ -61,6 +11,8 @@
  * (c) 2001 Red Hat Inc
  * Lockless wakeup
  * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
+ * Further wakeup optimizations, documentation
+ * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  *
  * support for audit of ipc object properties and permission changes
  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
@@ -68,6 +20,57 @@
  * namespaces support
  * OpenVZ, SWsoft Inc.
  * Pavel Emelianov <xemul@openvz.org>
+ *
+ * Implementation notes: (May 2010)
+ * This file implements System V semaphores.
+ *
+ * User space visible behavior:
+ * - FIFO ordering for semop() operations (just FIFO, not starvation
+ *   protection)
+ * - multiple semaphore operations that alter the same semaphore in
+ *   one semop() are handled.
+ * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
+ *   SETALL calls.
+ * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
+ * - undo adjustments at process exit are limited to 0..SEMVMX.
+ * - namespace are supported.
+ * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
+ *   to /proc/sys/kernel/sem.
+ * - statistics about the usage are reported in /proc/sysvipc/sem.
+ *
+ * Internals:
+ * - scalability:
+ *   - all global variables are read-mostly.
+ *   - semop() calls and semctl(RMID) are synchronized by RCU.
+ *   - most operations do write operations (actually: spin_lock calls) to
+ *     the per-semaphore array structure.
+ *   Thus: Perfect SMP scaling between independent semaphore arrays.
+ *         If multiple semaphores in one array are used, then cache line
+ *         trashing on the semaphore array spinlock will limit the scaling.
+ * - semncnt and semzcnt are calculated on demand in count_semncnt() and
+ *   count_semzcnt()
+ * - the task that performs a successful semop() scans the list of all
+ *   sleeping tasks and completes any pending operations that can be fulfilled.
+ *   Semaphores are actively given to waiting tasks (necessary for FIFO).
+ *   (see update_queue())
+ * - To improve the scalability, the actual wake-up calls are performed after
+ *   dropping all locks. (see wake_up_sem_queue_prepare(),
+ *   wake_up_sem_queue_do())
+ * - All work is done by the waker, the woken up task does not have to do
+ *   anything - not even acquiring a lock or dropping a refcount.
+ * - A woken up task may not even touch the semaphore array anymore, it may
+ *   have been destroyed already by a semctl(RMID).
+ * - The synchronizations between wake-ups due to a timeout/signal and a
+ *   wake-up due to a completed semaphore operation is achieved by using an
+ *   intermediate state (IN_WAKEUP).
+ * - UNDO values are stored in an array (one per process and per
+ *   semaphore array, lazily allocated). For backwards compatibility, multiple
+ *   modes for the UNDO variables are supported (per process, per thread)
+ *   (see copy_semundo, CLONE_SYSVSEM)
+ * - There are two lists of the pending operations: a per-array list
+ *   and per-semaphore list (stored in the array). This allows to achieve FIFO
+ *   ordering without always scanning all pending operations.
+ *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
  */
 
 #include <linux/slab.h>
@@ -381,7 +384,6 @@
 		sop--;
 	}
 	
-	sma->sem_otime = get_seconds();
 	return 0;
 
 out_of_range:
@@ -404,25 +406,51 @@
 	return result;
 }
 
-/*
- * Wake up a process waiting on the sem queue with a given error.
- * The queue is invalid (may not be accessed) after the function returns.
+/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
+ * @q: queue entry that must be signaled
+ * @error: Error value for the signal
+ *
+ * Prepare the wake-up of the queue entry q.
  */
-static void wake_up_sem_queue(struct sem_queue *q, int error)
+static void wake_up_sem_queue_prepare(struct list_head *pt,
+				struct sem_queue *q, int error)
 {
-	/*
-	 * Hold preempt off so that we don't get preempted and have the
-	 * wakee busy-wait until we're scheduled back on. We're holding
-	 * locks here so it may not strictly be needed, however if the
-	 * locks become preemptible then this prevents such a problem.
-	 */
-	preempt_disable();
+	if (list_empty(pt)) {
+		/*
+		 * Hold preempt off so that we don't get preempted and have the
+		 * wakee busy-wait until we're scheduled back on.
+		 */
+		preempt_disable();
+	}
 	q->status = IN_WAKEUP;
-	wake_up_process(q->sleeper);
-	/* hands-off: q can disappear immediately after writing q->status. */
-	smp_wmb();
-	q->status = error;
-	preempt_enable();
+	q->pid = error;
+
+	list_add_tail(&q->simple_list, pt);
+}
+
+/**
+ * wake_up_sem_queue_do(pt) - do the actual wake-up
+ * @pt: list of tasks to be woken up
+ *
+ * Do the actual wake-up.
+ * The function is called without any locks held, thus the semaphore array
+ * could be destroyed already and the tasks can disappear as soon as the
+ * status is set to the actual return code.
+ */
+static void wake_up_sem_queue_do(struct list_head *pt)
+{
+	struct sem_queue *q, *t;
+	int did_something;
+
+	did_something = !list_empty(pt);
+	list_for_each_entry_safe(q, t, pt, simple_list) {
+		wake_up_process(q->sleeper);
+		/* q can disappear immediately after writing q->status. */
+		smp_wmb();
+		q->status = q->pid;
+	}
+	if (did_something)
+		preempt_enable();
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -434,22 +462,90 @@
 		sma->complex_count--;
 }
 
+/** check_restart(sma, q)
+ * @sma: semaphore array
+ * @q: the operation that just completed
+ *
+ * update_queue is O(N^2) when it restarts scanning the whole queue of
+ * waiting operations. Therefore this function checks if the restart is
+ * really necessary. It is called after a previously waiting operation
+ * was completed.
+ */
+static int check_restart(struct sem_array *sma, struct sem_queue *q)
+{
+	struct sem *curr;
+	struct sem_queue *h;
+
+	/* if the operation didn't modify the array, then no restart */
+	if (q->alter == 0)
+		return 0;
+
+	/* pending complex operations are too difficult to analyse */
+	if (sma->complex_count)
+		return 1;
+
+	/* we were a sleeping complex operation. Too difficult */
+	if (q->nsops > 1)
+		return 1;
+
+	curr = sma->sem_base + q->sops[0].sem_num;
+
+	/* No-one waits on this queue */
+	if (list_empty(&curr->sem_pending))
+		return 0;
+
+	/* the new semaphore value */
+	if (curr->semval) {
+		/* It is impossible that someone waits for the new value:
+		 * - q is a previously sleeping simple operation that
+		 *   altered the array. It must be a decrement, because
+		 *   simple increments never sleep.
+		 * - The value is not 0, thus wait-for-zero won't proceed.
+		 * - If there are older (higher priority) decrements
+		 *   in the queue, then they have observed the original
+		 *   semval value and couldn't proceed. The operation
+		 *   decremented to value - thus they won't proceed either.
+		 */
+		BUG_ON(q->sops[0].sem_op >= 0);
+		return 0;
+	}
+	/*
+	 * semval is 0. Check if there are wait-for-zero semops.
+	 * They must be the first entries in the per-semaphore simple queue
+	 */
+	h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
+	BUG_ON(h->nsops != 1);
+	BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
+
+	/* Yes, there is a wait-for-zero semop. Restart */
+	if (h->sops[0].sem_op == 0)
+		return 1;
+
+	/* Again - no-one is waiting for the new value. */
+	return 0;
+}
+
 
 /**
  * update_queue(sma, semnum): Look for tasks that can be completed.
  * @sma: semaphore array.
  * @semnum: semaphore that was modified.
+ * @pt: list head for the tasks that must be woken up.
  *
  * update_queue must be called after a semaphore in a semaphore array
  * was modified. If multiple semaphore were modified, then @semnum
  * must be set to -1.
+ * The tasks that must be woken up are added to @pt. The return code
+ * is stored in q->pid.
+ * The function return 1 if at least one semop was completed successfully.
  */
-static void update_queue(struct sem_array *sma, int semnum)
+static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 {
 	struct sem_queue *q;
 	struct list_head *walk;
 	struct list_head *pending_list;
 	int offset;
+	int semop_completed = 0;
 
 	/* if there are complex operations around, then knowing the semaphore
 	 * that was modified doesn't help us. Assume that multiple semaphores
@@ -469,7 +565,7 @@
 again:
 	walk = pending_list->next;
 	while (walk != pending_list) {
-		int error, alter;
+		int error, restart;
 
 		q = (struct sem_queue *)((char *)walk - offset);
 		walk = walk->next;
@@ -494,22 +590,58 @@
 
 		unlink_queue(sma, q);
 
-		/*
-		 * The next operation that must be checked depends on the type
-		 * of the completed operation:
-		 * - if the operation modified the array, then restart from the
-		 *   head of the queue and check for threads that might be
-		 *   waiting for the new semaphore values.
-		 * - if the operation didn't modify the array, then just
-		 *   continue.
-		 */
-		alter = q->alter;
-		wake_up_sem_queue(q, error);
-		if (alter && !error)
+		if (error) {
+			restart = 0;
+		} else {
+			semop_completed = 1;
+			restart = check_restart(sma, q);
+		}
+
+		wake_up_sem_queue_prepare(pt, q, error);
+		if (restart)
 			goto again;
 	}
+	return semop_completed;
 }
 
+/**
+ * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
+ * @sma: semaphore array
+ * @sops: operations that were performed
+ * @nsops: number of operations
+ * @otime: force setting otime
+ * @pt: list head of the tasks that must be woken up.
+ *
+ * do_smart_update() does the required called to update_queue, based on the
+ * actual changes that were performed on the semaphore array.
+ * Note that the function does not do the actual wake-up: the caller is
+ * responsible for calling wake_up_sem_queue_do(@pt).
+ * It is safe to perform this call after dropping all locks.
+ */
+static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
+			int otime, struct list_head *pt)
+{
+	int i;
+
+	if (sma->complex_count || sops == NULL) {
+		if (update_queue(sma, -1, pt))
+			otime = 1;
+		goto done;
+	}
+
+	for (i = 0; i < nsops; i++) {
+		if (sops[i].sem_op > 0 ||
+			(sops[i].sem_op < 0 &&
+				sma->sem_base[sops[i].sem_num].semval == 0))
+			if (update_queue(sma, sops[i].sem_num, pt))
+				otime = 1;
+	}
+done:
+	if (otime)
+		sma->sem_otime = get_seconds();
+}
+
+
 /* The following counts are associated to each semaphore:
  *   semncnt        number of tasks waiting on semval being nonzero
  *   semzcnt        number of tasks waiting on semval being zero
@@ -572,6 +704,7 @@
 	struct sem_undo *un, *tu;
 	struct sem_queue *q, *tq;
 	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
+	struct list_head tasks;
 
 	/* Free the existing undo structures for this semaphore set.  */
 	assert_spin_locked(&sma->sem_perm.lock);
@@ -585,15 +718,17 @@
 	}
 
 	/* Wake up all pending processes and let them fail with EIDRM. */
+	INIT_LIST_HEAD(&tasks);
 	list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
 		unlink_queue(sma, q);
-		wake_up_sem_queue(q, -EIDRM);
+		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
 	}
 
 	/* Remove the semaphore set from the IDR */
 	sem_rmid(ns, sma);
 	sem_unlock(sma);
 
+	wake_up_sem_queue_do(&tasks);
 	ns->used_sems -= sma->sem_nsems;
 	security_sem_free(sma);
 	ipc_rcu_putref(sma);
@@ -715,11 +850,13 @@
 	ushort fast_sem_io[SEMMSL_FAST];
 	ushort* sem_io = fast_sem_io;
 	int nsems;
+	struct list_head tasks;
 
 	sma = sem_lock_check(ns, semid);
 	if (IS_ERR(sma))
 		return PTR_ERR(sma);
 
+	INIT_LIST_HEAD(&tasks);
 	nsems = sma->sem_nsems;
 
 	err = -EACCES;
@@ -807,7 +944,7 @@
 		}
 		sma->sem_ctime = get_seconds();
 		/* maybe some queued-up processes were waiting for this */
-		update_queue(sma, -1);
+		do_smart_update(sma, NULL, 0, 0, &tasks);
 		err = 0;
 		goto out_unlock;
 	}
@@ -849,13 +986,15 @@
 		curr->sempid = task_tgid_vnr(current);
 		sma->sem_ctime = get_seconds();
 		/* maybe some queued-up processes were waiting for this */
-		update_queue(sma, semnum);
+		do_smart_update(sma, NULL, 0, 0, &tasks);
 		err = 0;
 		goto out_unlock;
 	}
 	}
 out_unlock:
 	sem_unlock(sma);
+	wake_up_sem_queue_do(&tasks);
+
 out_free:
 	if(sem_io != fast_sem_io)
 		ipc_free(sem_io, sizeof(ushort)*nsems);
@@ -1069,7 +1208,7 @@
 	/* step 1: figure out the size of the semaphore array */
 	sma = sem_lock_check(ns, semid);
 	if (IS_ERR(sma))
-		return ERR_PTR(PTR_ERR(sma));
+		return ERR_CAST(sma);
 
 	nsems = sma->sem_nsems;
 	sem_getref_and_unlock(sma);
@@ -1129,6 +1268,7 @@
 	struct sem_queue queue;
 	unsigned long jiffies_left = 0;
 	struct ipc_namespace *ns;
+	struct list_head tasks;
 
 	ns = current->nsproxy->ipc_ns;
 
@@ -1177,6 +1317,8 @@
 	} else
 		un = NULL;
 
+	INIT_LIST_HEAD(&tasks);
+
 	sma = sem_lock_check(ns, semid);
 	if (IS_ERR(sma)) {
 		if (un)
@@ -1225,7 +1367,7 @@
 	error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
 	if (error <= 0) {
 		if (alter && error == 0)
-			update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
+			do_smart_update(sma, sops, nsops, 1, &tasks);
 
 		goto out_unlock_free;
 	}
@@ -1302,6 +1444,8 @@
 
 out_unlock_free:
 	sem_unlock(sma);
+
+	wake_up_sem_queue_do(&tasks);
 out_free:
 	if(sops != fast_sops)
 		kfree(sops);
@@ -1362,6 +1506,7 @@
 	for (;;) {
 		struct sem_array *sma;
 		struct sem_undo *un;
+		struct list_head tasks;
 		int semid;
 		int i;
 
@@ -1425,10 +1570,11 @@
 				semaphore->sempid = task_tgid_vnr(current);
 			}
 		}
-		sma->sem_otime = get_seconds();
 		/* maybe some queued-up processes were waiting for this */
-		update_queue(sma, -1);
+		INIT_LIST_HEAD(&tasks);
+		do_smart_update(sma, NULL, 0, 1, &tasks);
 		sem_unlock(sma);
+		wake_up_sem_queue_do(&tasks);
 
 		call_rcu(&un->rcu, free_un);
 	}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2917750..422cb19 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2994,7 +2994,6 @@
 			remove);
 	struct cgroup *cgrp = event->cgrp;
 
-	/* TODO: check return code */
 	event->cft->unregister_event(cgrp, event->cft, event->eventfd);
 
 	eventfd_ctx_put(event->eventfd);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 124ad9d..63e8de1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,6 +20,20 @@
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
 static DEFINE_MUTEX(cpu_add_remove_lock);
 
+/*
+ * The following two API's must be used when attempting
+ * to serialize the updates to cpu_online_mask, cpu_present_mask.
+ */
+void cpu_maps_update_begin(void)
+{
+	mutex_lock(&cpu_add_remove_lock);
+}
+
+void cpu_maps_update_done(void)
+{
+	mutex_unlock(&cpu_add_remove_lock);
+}
+
 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
 
 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
@@ -27,6 +41,8 @@
  */
 static int cpu_hotplug_disabled;
 
+#ifdef CONFIG_HOTPLUG_CPU
+
 static struct {
 	struct task_struct *active_writer;
 	struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@
 	.refcount = 0,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 void get_online_cpus(void)
 {
 	might_sleep();
@@ -67,22 +81,6 @@
 }
 EXPORT_SYMBOL_GPL(put_online_cpus);
 
-#endif	/* CONFIG_HOTPLUG_CPU */
-
-/*
- * The following two API's must be used when attempting
- * to serialize the updates to cpu_online_mask, cpu_present_mask.
- */
-void cpu_maps_update_begin(void)
-{
-	mutex_lock(&cpu_add_remove_lock);
-}
-
-void cpu_maps_update_done(void)
-{
-	mutex_unlock(&cpu_add_remove_lock);
-}
-
 /*
  * This ensures that the hotplug operation can begin only when the
  * refcount goes to zero.
@@ -124,6 +122,12 @@
 	cpu_hotplug.active_writer = NULL;
 	mutex_unlock(&cpu_hotplug.lock);
 }
+
+#else /* #if CONFIG_HOTPLUG_CPU */
+static void cpu_hotplug_begin(void) {}
+static void cpu_hotplug_done(void) {}
+#endif	/* #esle #if CONFIG_HOTPLUG_CPU */
+
 /* Need to know about CPUs going up/down? */
 int __ref register_cpu_notifier(struct notifier_block *nb)
 {
@@ -134,6 +138,30 @@
 	return ret;
 }
 
+static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
+			int *nr_calls)
+{
+	int ret;
+
+	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
+					nr_calls);
+
+	return notifier_to_errno(ret);
+}
+
+static int cpu_notify(unsigned long val, void *v)
+{
+	return __cpu_notify(val, v, -1, NULL);
+}
+
+static void cpu_notify_nofail(unsigned long val, void *v)
+{
+	int err;
+
+	err = cpu_notify(val, v);
+	BUG_ON(err);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 
 EXPORT_SYMBOL(register_cpu_notifier);
@@ -181,8 +209,7 @@
 	if (err < 0)
 		return err;
 
-	raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
-				param->hcpu);
+	cpu_notify(CPU_DYING | param->mod, param->hcpu);
 
 	if (task_cpu(param->caller) == cpu)
 		move_task_off_dead_cpu(cpu, param->caller);
@@ -212,17 +239,14 @@
 
 	cpu_hotplug_begin();
 	set_cpu_active(cpu, false);
-	err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
-					hcpu, -1, &nr_calls);
-	if (err == NOTIFY_BAD) {
+	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+	if (err) {
 		set_cpu_active(cpu, true);
 
 		nr_calls--;
-		__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
-					  hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
 		printk("%s: attempt to take down CPU %u failed\n",
 				__func__, cpu);
-		err = -EINVAL;
 		goto out_release;
 	}
 
@@ -230,9 +254,7 @@
 	if (err) {
 		set_cpu_active(cpu, true);
 		/* CPU didn't die: tell everyone.  Can't complain. */
-		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
-					    hcpu) == NOTIFY_BAD)
-			BUG();
+		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
 
 		goto out_release;
 	}
@@ -246,19 +268,14 @@
 	__cpu_die(cpu);
 
 	/* CPU is completely dead: tell everyone.  Too late to complain. */
-	if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
-				    hcpu) == NOTIFY_BAD)
-		BUG();
+	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
 
 	check_for_tasks(cpu);
 
 out_release:
 	cpu_hotplug_done();
-	if (!err) {
-		if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
-					    hcpu) == NOTIFY_BAD)
-			BUG();
-	}
+	if (!err)
+		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 	return err;
 }
 
@@ -293,13 +310,11 @@
 		return -EINVAL;
 
 	cpu_hotplug_begin();
-	ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
-							-1, &nr_calls);
-	if (ret == NOTIFY_BAD) {
+	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
+	if (ret) {
 		nr_calls--;
 		printk("%s: attempt to bring up CPU %u failed\n",
 				__func__, cpu);
-		ret = -EINVAL;
 		goto out_notify;
 	}
 
@@ -312,12 +327,11 @@
 	set_cpu_active(cpu, true);
 
 	/* Now call notifier in preparation. */
-	raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
+	cpu_notify(CPU_ONLINE | mod, hcpu);
 
 out_notify:
 	if (ret != 0)
-		__raw_notifier_call_chain(&cpu_chain,
-				CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
+		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 	cpu_hotplug_done();
 
 	return ret;
@@ -481,7 +495,7 @@
 	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
 		val = CPU_STARTING_FROZEN;
 #endif /* CONFIG_PM_SLEEP_SMP */
-	raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
+	cpu_notify(val, (void *)(long)cpu);
 }
 
 #endif /* CONFIG_SMP */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 61d6af7..02b9611 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2469,7 +2469,8 @@
 }
 
 /**
- * cpuset_mem_spread_node() - On which node to begin search for a page
+ * cpuset_mem_spread_node() - On which node to begin search for a file page
+ * cpuset_slab_spread_node() - On which node to begin search for a slab page
  *
  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
  * tasks in a cpuset with is_spread_page or is_spread_slab set),
@@ -2494,16 +2495,27 @@
  * See kmem_cache_alloc_node().
  */
 
-int cpuset_mem_spread_node(void)
+static int cpuset_spread_node(int *rotor)
 {
 	int node;
 
-	node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
+	node = next_node(*rotor, current->mems_allowed);
 	if (node == MAX_NUMNODES)
 		node = first_node(current->mems_allowed);
-	current->cpuset_mem_spread_rotor = node;
+	*rotor = node;
 	return node;
 }
+
+int cpuset_mem_spread_node(void)
+{
+	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
+}
+
+int cpuset_slab_spread_node(void)
+{
+	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
+}
+
 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
 
 /**
diff --git a/kernel/cred.c b/kernel/cred.c
index 2c24870..a2d5504 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -347,66 +347,6 @@
 }
 
 /*
- * prepare new credentials for the usermode helper dispatcher
- */
-struct cred *prepare_usermodehelper_creds(void)
-{
-#ifdef CONFIG_KEYS
-	struct thread_group_cred *tgcred = NULL;
-#endif
-	struct cred *new;
-
-#ifdef CONFIG_KEYS
-	tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC);
-	if (!tgcred)
-		return NULL;
-#endif
-
-	new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
-	if (!new)
-		goto free_tgcred;
-
-	kdebug("prepare_usermodehelper_creds() alloc %p", new);
-
-	memcpy(new, &init_cred, sizeof(struct cred));
-
-	atomic_set(&new->usage, 1);
-	set_cred_subscribers(new, 0);
-	get_group_info(new->group_info);
-	get_uid(new->user);
-
-#ifdef CONFIG_KEYS
-	new->thread_keyring = NULL;
-	new->request_key_auth = NULL;
-	new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT;
-
-	atomic_set(&tgcred->usage, 1);
-	spin_lock_init(&tgcred->lock);
-	new->tgcred = tgcred;
-#endif
-
-#ifdef CONFIG_SECURITY
-	new->security = NULL;
-#endif
-	if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
-		goto error;
-	validate_creds(new);
-
-	BUG_ON(atomic_read(&new->usage) != 1);
-	return new;
-
-error:
-	put_cred(new);
-	return NULL;
-
-free_tgcred:
-#ifdef CONFIG_KEYS
-	kfree(tgcred);
-#endif
-	return NULL;
-}
-
-/*
  * Copy credentials for the new process created by fork()
  *
  * We share if we can, but under some circumstances we have to generate a new
diff --git a/kernel/exit.c b/kernel/exit.c
index 019a284..ceffc67 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -58,11 +58,11 @@
 
 static void exit_mm(struct task_struct * tsk);
 
-static void __unhash_process(struct task_struct *p)
+static void __unhash_process(struct task_struct *p, bool group_dead)
 {
 	nr_threads--;
 	detach_pid(p, PIDTYPE_PID);
-	if (thread_group_leader(p)) {
+	if (group_dead) {
 		detach_pid(p, PIDTYPE_PGID);
 		detach_pid(p, PIDTYPE_SID);
 
@@ -79,10 +79,9 @@
 static void __exit_signal(struct task_struct *tsk)
 {
 	struct signal_struct *sig = tsk->signal;
+	bool group_dead = thread_group_leader(tsk);
 	struct sighand_struct *sighand;
-
-	BUG_ON(!sig);
-	BUG_ON(!atomic_read(&sig->count));
+	struct tty_struct *uninitialized_var(tty);
 
 	sighand = rcu_dereference_check(tsk->sighand,
 					rcu_read_lock_held() ||
@@ -90,14 +89,16 @@
 	spin_lock(&sighand->siglock);
 
 	posix_cpu_timers_exit(tsk);
-	if (atomic_dec_and_test(&sig->count))
+	if (group_dead) {
 		posix_cpu_timers_exit_group(tsk);
-	else {
+		tty = sig->tty;
+		sig->tty = NULL;
+	} else {
 		/*
 		 * If there is any task waiting for the group exit
 		 * then notify it:
 		 */
-		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
+		if (sig->notify_count > 0 && !--sig->notify_count)
 			wake_up_process(sig->group_exit_task);
 
 		if (tsk == sig->curr_target)
@@ -123,32 +124,24 @@
 		sig->oublock += task_io_get_oublock(tsk);
 		task_io_accounting_add(&sig->ioac, &tsk->ioac);
 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-		sig = NULL; /* Marker for below. */
 	}
 
-	__unhash_process(tsk);
+	sig->nr_threads--;
+	__unhash_process(tsk, group_dead);
 
 	/*
 	 * Do this under ->siglock, we can race with another thread
 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
 	 */
 	flush_sigqueue(&tsk->pending);
-
-	tsk->signal = NULL;
 	tsk->sighand = NULL;
 	spin_unlock(&sighand->siglock);
 
 	__cleanup_sighand(sighand);
 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
-	if (sig) {
+	if (group_dead) {
 		flush_sigqueue(&sig->shared_pending);
-		taskstats_tgid_free(sig);
-		/*
-		 * Make sure ->signal can't go away under rq->lock,
-		 * see account_group_exec_runtime().
-		 */
-		task_rq_unlock_wait(tsk);
-		__cleanup_signal(sig);
+		tty_kref_put(tty);
 	}
 }
 
@@ -856,12 +849,9 @@
 
 	tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
 
-	/* mt-exec, de_thread() is waiting for us */
-	if (thread_group_leader(tsk) &&
-	    tsk->signal->group_exit_task &&
-	    tsk->signal->notify_count < 0)
+	/* mt-exec, de_thread() is waiting for group leader */
+	if (unlikely(tsk->signal->notify_count < 0))
 		wake_up_process(tsk->signal->group_exit_task);
-
 	write_unlock_irq(&tasklist_lock);
 
 	tracehook_report_death(tsk, signal, cookie, group_dead);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d57d9e..bf9fef6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -165,6 +165,18 @@
 }
 EXPORT_SYMBOL(free_task);
 
+static inline void free_signal_struct(struct signal_struct *sig)
+{
+	taskstats_tgid_free(sig);
+	kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void put_signal_struct(struct signal_struct *sig)
+{
+	if (atomic_dec_and_test(&sig->sigcnt))
+		free_signal_struct(sig);
+}
+
 void __put_task_struct(struct task_struct *tsk)
 {
 	WARN_ON(!tsk->exit_state);
@@ -173,6 +185,7 @@
 
 	exit_creds(tsk);
 	delayacct_tsk_free(tsk);
+	put_signal_struct(tsk->signal);
 
 	if (!profile_handoff_task(tsk))
 		free_task(tsk);
@@ -864,8 +877,9 @@
 	if (!sig)
 		return -ENOMEM;
 
-	atomic_set(&sig->count, 1);
+	sig->nr_threads = 1;
 	atomic_set(&sig->live, 1);
+	atomic_set(&sig->sigcnt, 1);
 	init_waitqueue_head(&sig->wait_chldexit);
 	if (clone_flags & CLONE_NEWPID)
 		sig->flags |= SIGNAL_UNKILLABLE;
@@ -889,13 +903,6 @@
 	return 0;
 }
 
-void __cleanup_signal(struct signal_struct *sig)
-{
-	thread_group_cputime_free(sig);
-	tty_kref_put(sig->tty);
-	kmem_cache_free(signal_cachep, sig);
-}
-
 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 {
 	unsigned long new_flags = p->flags;
@@ -1079,6 +1086,10 @@
  	}
 	mpol_fix_fork_child_flag(p);
 #endif
+#ifdef CONFIG_CPUSETS
+	p->cpuset_mem_spread_rotor = node_random(p->mems_allowed);
+	p->cpuset_slab_spread_rotor = node_random(p->mems_allowed);
+#endif
 #ifdef CONFIG_TRACE_IRQFLAGS
 	p->irq_events = 0;
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
@@ -1245,8 +1256,9 @@
 	}
 
 	if (clone_flags & CLONE_THREAD) {
-		atomic_inc(&current->signal->count);
+		current->signal->nr_threads++;
 		atomic_inc(&current->signal->live);
+		atomic_inc(&current->signal->sigcnt);
 		p->group_leader = current->group_leader;
 		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
 	}
@@ -1259,7 +1271,6 @@
 				p->nsproxy->pid_ns->child_reaper = p;
 
 			p->signal->leader_pid = pid;
-			tty_kref_put(p->signal->tty);
 			p->signal->tty = tty_kref_get(current->signal->tty);
 			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
 			attach_pid(p, PIDTYPE_SID, task_session(current));
@@ -1292,7 +1303,7 @@
 		mmput(p->mm);
 bad_fork_cleanup_signal:
 	if (!(clone_flags & CLONE_THREAD))
-		__cleanup_signal(p->signal);
+		free_signal_struct(p->signal);
 bad_fork_cleanup_sighand:
 	__cleanup_sighand(p->sighand);
 bad_fork_cleanup_fs:
@@ -1327,6 +1338,16 @@
 	return regs;
 }
 
+static inline void init_idle_pids(struct pid_link *links)
+{
+	enum pid_type type;
+
+	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+		INIT_HLIST_NODE(&links[type].node); /* not really needed */
+		links[type].pid = &init_struct_pid;
+	}
+}
+
 struct task_struct * __cpuinit fork_idle(int cpu)
 {
 	struct task_struct *task;
@@ -1334,8 +1355,10 @@
 
 	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
 			    &init_struct_pid, 0);
-	if (!IS_ERR(task))
+	if (!IS_ERR(task)) {
+		init_idle_pids(task->pids);
 		init_idle(task, cpu);
+	}
 
 	return task;
 }
@@ -1507,14 +1530,6 @@
 		*flags_ptr |= CLONE_SIGHAND;
 
 	/*
-	 * If unsharing signal handlers and the task was created
-	 * using CLONE_THREAD, then must unshare the thread
-	 */
-	if ((*flags_ptr & CLONE_SIGHAND) &&
-	    (atomic_read(&current->signal->count) > 1))
-		*flags_ptr |= CLONE_THREAD;
-
-	/*
 	 * If unsharing namespace, must also unshare filesystem information.
 	 */
 	if (*flags_ptr & CLONE_NEWNS)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bf0e231..6e9b196 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -116,27 +116,16 @@
 
 	trace_module_request(module_name, wait, _RET_IP_);
 
-	ret = call_usermodehelper(modprobe_path, argv, envp,
-			wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+	ret = call_usermodehelper_fns(modprobe_path, argv, envp,
+			wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
+			NULL, NULL, NULL);
+
 	atomic_dec(&kmod_concurrent);
 	return ret;
 }
 EXPORT_SYMBOL(__request_module);
 #endif /* CONFIG_MODULES */
 
-struct subprocess_info {
-	struct work_struct work;
-	struct completion *complete;
-	struct cred *cred;
-	char *path;
-	char **argv;
-	char **envp;
-	enum umh_wait wait;
-	int retval;
-	struct file *stdin;
-	void (*cleanup)(char **argv, char **envp);
-};
-
 /*
  * This is the task which runs the usermode application
  */
@@ -145,36 +134,10 @@
 	struct subprocess_info *sub_info = data;
 	int retval;
 
-	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
-
-	/* Unblock all signals */
 	spin_lock_irq(&current->sighand->siglock);
 	flush_signal_handlers(current, 1);
-	sigemptyset(&current->blocked);
-	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	/* Install the credentials */
-	commit_creds(sub_info->cred);
-	sub_info->cred = NULL;
-
-	/* Install input pipe when needed */
-	if (sub_info->stdin) {
-		struct files_struct *f = current->files;
-		struct fdtable *fdt;
-		/* no races because files should be private here */
-		sys_close(0);
-		fd_install(0, sub_info->stdin);
-		spin_lock(&f->file_lock);
-		fdt = files_fdtable(f);
-		FD_SET(0, fdt->open_fds);
-		FD_CLR(0, fdt->close_on_exec);
-		spin_unlock(&f->file_lock);
-
-		/* and disallow core files too */
-		current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
-	}
-
 	/* We can run anywhere, unlike our parent keventd(). */
 	set_cpus_allowed_ptr(current, cpu_all_mask);
 
@@ -184,9 +147,16 @@
 	 */
 	set_user_nice(current, 0);
 
+	if (sub_info->init) {
+		retval = sub_info->init(sub_info);
+		if (retval)
+			goto fail;
+	}
+
 	retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
 
 	/* Exec failed? */
+fail:
 	sub_info->retval = retval;
 	do_exit(0);
 }
@@ -194,9 +164,7 @@
 void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
 	if (info->cleanup)
-		(*info->cleanup)(info->argv, info->envp);
-	if (info->cred)
-		put_cred(info->cred);
+		(*info->cleanup)(info);
 	kfree(info);
 }
 EXPORT_SYMBOL(call_usermodehelper_freeinfo);
@@ -207,16 +175,16 @@
 	struct subprocess_info *sub_info = data;
 	pid_t pid;
 
-	/* Install a handler: if SIGCLD isn't handled sys_wait4 won't
-	 * populate the status, but will return -ECHILD. */
-	allow_signal(SIGCHLD);
+	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
+	spin_lock_irq(&current->sighand->siglock);
+	current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
+	spin_unlock_irq(&current->sighand->siglock);
 
 	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
 	if (pid < 0) {
 		sub_info->retval = pid;
 	} else {
-		int ret;
-
+		int ret = -ECHILD;
 		/*
 		 * Normally it is bogus to call wait4() from in-kernel because
 		 * wait4() wants to write the exit code to a userspace address.
@@ -237,10 +205,7 @@
 			sub_info->retval = ret;
 	}
 
-	if (sub_info->wait == UMH_NO_WAIT)
-		call_usermodehelper_freeinfo(sub_info);
-	else
-		complete(sub_info->complete);
+	complete(sub_info->complete);
 	return 0;
 }
 
@@ -249,15 +214,13 @@
 {
 	struct subprocess_info *sub_info =
 		container_of(work, struct subprocess_info, work);
-	pid_t pid;
 	enum umh_wait wait = sub_info->wait;
-
-	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+	pid_t pid;
 
 	/* CLONE_VFORK: wait until the usermode helper has execve'd
 	 * successfully We need the data structures to stay around
 	 * until that is done.  */
-	if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
+	if (wait == UMH_WAIT_PROC)
 		pid = kernel_thread(wait_for_helper, sub_info,
 				    CLONE_FS | CLONE_FILES | SIGCHLD);
 	else
@@ -266,15 +229,16 @@
 
 	switch (wait) {
 	case UMH_NO_WAIT:
+		call_usermodehelper_freeinfo(sub_info);
 		break;
 
 	case UMH_WAIT_PROC:
 		if (pid > 0)
 			break;
-		sub_info->retval = pid;
 		/* FALLTHROUGH */
-
 	case UMH_WAIT_EXEC:
+		if (pid < 0)
+			sub_info->retval = pid;
 		complete(sub_info->complete);
 	}
 }
@@ -376,80 +340,37 @@
 	sub_info->path = path;
 	sub_info->argv = argv;
 	sub_info->envp = envp;
-	sub_info->cred = prepare_usermodehelper_creds();
-	if (!sub_info->cred) {
-		kfree(sub_info);
-		return NULL;
-	}
-
   out:
 	return sub_info;
 }
 EXPORT_SYMBOL(call_usermodehelper_setup);
 
 /**
- * call_usermodehelper_setkeys - set the session keys for usermode helper
- * @info: a subprocess_info returned by call_usermodehelper_setup
- * @session_keyring: the session keyring for the process
- */
-void call_usermodehelper_setkeys(struct subprocess_info *info,
-				 struct key *session_keyring)
-{
-#ifdef CONFIG_KEYS
-	struct thread_group_cred *tgcred = info->cred->tgcred;
-	key_put(tgcred->session_keyring);
-	tgcred->session_keyring = key_get(session_keyring);
-#else
-	BUG();
-#endif
-}
-EXPORT_SYMBOL(call_usermodehelper_setkeys);
-
-/**
- * call_usermodehelper_setcleanup - set a cleanup function
+ * call_usermodehelper_setfns - set a cleanup/init function
  * @info: a subprocess_info returned by call_usermodehelper_setup
  * @cleanup: a cleanup function
+ * @init: an init function
+ * @data: arbitrary context sensitive data
  *
- * The cleanup function is just befor ethe subprocess_info is about to
+ * The init function is used to customize the helper process prior to
+ * exec.  A non-zero return code causes the process to error out, exit,
+ * and return the failure to the calling process
+ *
+ * The cleanup function is just before ethe subprocess_info is about to
  * be freed.  This can be used for freeing the argv and envp.  The
  * Function must be runnable in either a process context or the
  * context in which call_usermodehelper_exec is called.
  */
-void call_usermodehelper_setcleanup(struct subprocess_info *info,
-				    void (*cleanup)(char **argv, char **envp))
+void call_usermodehelper_setfns(struct subprocess_info *info,
+		    int (*init)(struct subprocess_info *info),
+		    void (*cleanup)(struct subprocess_info *info),
+		    void *data)
 {
 	info->cleanup = cleanup;
+	info->init = init;
+	info->data = data;
 }
-EXPORT_SYMBOL(call_usermodehelper_setcleanup);
-
-/**
- * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
- * @sub_info: a subprocess_info returned by call_usermodehelper_setup
- * @filp: set to the write-end of a pipe
- *
- * This constructs a pipe, and sets the read end to be the stdin of the
- * subprocess, and returns the write-end in *@filp.
- */
-int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
-				  struct file **filp)
-{
-	struct file *f;
-
-	f = create_write_pipe(0);
-	if (IS_ERR(f))
-		return PTR_ERR(f);
-	*filp = f;
-
-	f = create_read_pipe(f, 0);
-	if (IS_ERR(f)) {
-		free_write_pipe(*filp);
-		return PTR_ERR(f);
-	}
-	sub_info->stdin = f;
-
-	return 0;
-}
-EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
+EXPORT_SYMBOL(call_usermodehelper_setfns);
 
 /**
  * call_usermodehelper_exec - start a usermode application
@@ -469,9 +390,6 @@
 	DECLARE_COMPLETION_ONSTACK(done);
 	int retval = 0;
 
-	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
-	validate_creds(sub_info->cred);
-
 	helper_lock();
 	if (sub_info->path[0] == '\0')
 		goto out;
@@ -498,41 +416,6 @@
 }
 EXPORT_SYMBOL(call_usermodehelper_exec);
 
-/**
- * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
- * @path: path to usermode executable
- * @argv: arg vector for process
- * @envp: environment for process
- * @filp: set to the write-end of a pipe
- *
- * This is a simple wrapper which executes a usermode-helper function
- * with a pipe as stdin.  It is implemented entirely in terms of
- * lower-level call_usermodehelper_* functions.
- */
-int call_usermodehelper_pipe(char *path, char **argv, char **envp,
-			     struct file **filp)
-{
-	struct subprocess_info *sub_info;
-	int ret;
-
-	sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
-	if (sub_info == NULL)
-		return -ENOMEM;
-
-	ret = call_usermodehelper_stdinpipe(sub_info, filp);
-	if (ret < 0) {
-		call_usermodehelper_freeinfo(sub_info);
-		return ret;
-	}
-
-	ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
-	if (ret < 0)	/* Failed to execute helper, close pipe */
-		filp_close(*filp, NULL);
-
-	return ret;
-}
-EXPORT_SYMBOL(call_usermodehelper_pipe);
-
 void __init usermodehelper_init(void)
 {
 	khelper_wq = create_singlethread_workqueue("khelper");
diff --git a/kernel/padata.c b/kernel/padata.c
index b1c9857..fdd8ae6 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -659,7 +659,7 @@
 		err = __padata_add_cpu(pinst, cpu);
 		mutex_unlock(&pinst->lock);
 		if (err)
-			return NOTIFY_BAD;
+			return notifier_from_errno(err);
 		break;
 
 	case CPU_DOWN_PREPARE:
@@ -670,7 +670,7 @@
 		err = __padata_remove_cpu(pinst, cpu);
 		mutex_unlock(&pinst->lock);
 		if (err)
-			return NOTIFY_BAD;
+			return notifier_from_errno(err);
 		break;
 
 	case CPU_UP_CANCELED:
diff --git a/kernel/panic.c b/kernel/panic.c
index dbe13db..3b16cd9 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -87,6 +87,7 @@
 	 */
 	preempt_disable();
 
+	console_verbose();
 	bust_spinlocks(1);
 	va_start(args, fmt);
 	vsnprintf(buf, sizeof(buf), fmt, args);
diff --git a/kernel/pid.c b/kernel/pid.c
index aebb30d..e9fd8c1 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -513,6 +513,13 @@
 
 void __init pidmap_init(void)
 {
+	/* bump default and minimum pid_max based on number of cpus */
+	pid_max = min(pid_max_max, max_t(int, pid_max,
+				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
+	pid_max_min = max_t(int, pid_max_min,
+				PIDS_PER_CPU_MIN * num_possible_cpus());
+	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
+
 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	/* Reserve PID 0. We never call free_pidmap(0) */
 	set_bit(0, init_pid_ns.pidmap[0].page);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 00bb252..9829646 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -363,7 +363,7 @@
 				}
 			} else {
 				read_lock(&tasklist_lock);
-				if (thread_group_leader(p) && p->signal) {
+				if (thread_group_leader(p) && p->sighand) {
 					error =
 					    cpu_clock_sample_group(which_clock,
 							           p, &rtn);
@@ -439,7 +439,7 @@
 
 	if (likely(p != NULL)) {
 		read_lock(&tasklist_lock);
-		if (unlikely(p->signal == NULL)) {
+		if (unlikely(p->sighand == NULL)) {
 			/*
 			 * We raced with the reaping of the task.
 			 * The deletion should have cleared us off the list.
@@ -691,10 +691,10 @@
 	read_lock(&tasklist_lock);
 	/*
 	 * We need the tasklist_lock to protect against reaping that
-	 * clears p->signal.  If p has just been reaped, we can no
+	 * clears p->sighand.  If p has just been reaped, we can no
 	 * longer get any information about it at all.
 	 */
-	if (unlikely(p->signal == NULL)) {
+	if (unlikely(p->sighand == NULL)) {
 		read_unlock(&tasklist_lock);
 		put_task_struct(p);
 		timer->it.cpu.task = NULL;
@@ -863,7 +863,7 @@
 		clear_dead = p->exit_state;
 	} else {
 		read_lock(&tasklist_lock);
-		if (unlikely(p->signal == NULL)) {
+		if (unlikely(p->sighand == NULL)) {
 			/*
 			 * The process has been reaped.
 			 * We can't even collect a sample any more.
@@ -1199,7 +1199,7 @@
 		spin_lock(&p->sighand->siglock);
 	} else {
 		read_lock(&tasklist_lock);
-		if (unlikely(p->signal == NULL)) {
+		if (unlikely(p->sighand == NULL)) {
 			/*
 			 * The process has been reaped.
 			 * We can't even collect a sample any more.
diff --git a/kernel/profile.c b/kernel/profile.c
index dfadc5b..b22a899 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -365,14 +365,14 @@
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		node = cpu_to_node(cpu);
+		node = cpu_to_mem(cpu);
 		per_cpu(cpu_profile_flip, cpu) = 0;
 		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
 			page = alloc_pages_exact_node(node,
 					GFP_KERNEL | __GFP_ZERO,
 					0);
 			if (!page)
-				return NOTIFY_BAD;
+				return notifier_from_errno(-ENOMEM);
 			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
 		}
 		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
@@ -388,7 +388,7 @@
 		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
 		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
 		__free_page(page);
-		return NOTIFY_BAD;
+		return notifier_from_errno(-ENOMEM);
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
 		if (prof_cpu_mask != NULL)
@@ -567,7 +567,7 @@
 	int cpu;
 
 	for_each_online_cpu(cpu) {
-		int node = cpu_to_node(cpu);
+		int node = cpu_to_mem(cpu);
 		struct page *page;
 
 		page = alloc_pages_exact_node(node,
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6af9cdd..74a3d69 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -594,6 +594,32 @@
 		ret = ptrace_detach(child, data);
 		break;
 
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+	case PTRACE_GETFDPIC: {
+		struct mm_struct *mm = get_task_mm(child);
+		unsigned long tmp = 0;
+
+		ret = -ESRCH;
+		if (!mm)
+			break;
+
+		switch (addr) {
+		case PTRACE_GETFDPIC_EXEC:
+			tmp = mm->context.exec_fdpic_loadmap;
+			break;
+		case PTRACE_GETFDPIC_INTERP:
+			tmp = mm->context.interp_fdpic_loadmap;
+			break;
+		default:
+			break;
+		}
+		mmput(mm);
+
+		ret = put_user(tmp, (unsigned long __user *) data);
+		break;
+	}
+#endif
+
 #ifdef PTRACE_SINGLESTEP
 	case PTRACE_SINGLESTEP:
 #endif
diff --git a/kernel/relay.c b/kernel/relay.c
index 4268287..c7cf397 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -539,7 +539,7 @@
 					"relay_hotcpu_callback: cpu %d buffer "
 					"creation failed\n", hotcpu);
 				mutex_unlock(&relay_channels_mutex);
-				return NOTIFY_BAD;
+				return notifier_from_errno(-ENOMEM);
 			}
 		}
 		mutex_unlock(&relay_channels_mutex);
diff --git a/kernel/sched.c b/kernel/sched.c
index 054a601..15b93f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -969,14 +969,6 @@
 	}
 }
 
-void task_rq_unlock_wait(struct task_struct *p)
-{
-	struct rq *rq = task_rq(p);
-
-	smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-	raw_spin_unlock_wait(&rq->lock);
-}
-
 static void __task_rq_unlock(struct rq *rq)
 	__releases(rq->lock)
 {
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 87a330a..3556539 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -381,15 +381,9 @@
 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 {
 	unsigned long nr_switches;
-	unsigned long flags;
-	int num_threads = 1;
 
-	if (lock_task_sighand(p, &flags)) {
-		num_threads = atomic_read(&p->signal->count);
-		unlock_task_sighand(p, &flags);
-	}
-
-	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
+						get_nr_threads(p));
 	SEQ_printf(m,
 		"---------------------------------------------------------\n");
 #define __P(F) \
diff --git a/kernel/signal.c b/kernel/signal.c
index 825a3f2..906ae5a1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -642,7 +642,7 @@
 static int check_kill_permission(int sig, struct siginfo *info,
 				 struct task_struct *t)
 {
-	const struct cred *cred = current_cred(), *tcred;
+	const struct cred *cred, *tcred;
 	struct pid *sid;
 	int error;
 
@@ -656,8 +656,10 @@
 	if (error)
 		return error;
 
+	cred = current_cred();
 	tcred = __task_cred(t);
-	if ((cred->euid ^ tcred->suid) &&
+	if (!same_thread_group(current, t) &&
+	    (cred->euid ^ tcred->suid) &&
 	    (cred->euid ^ tcred->uid) &&
 	    (cred->uid  ^ tcred->suid) &&
 	    (cred->uid  ^ tcred->uid) &&
@@ -1083,23 +1085,24 @@
 /*
  * Nuke all other threads in the group.
  */
-void zap_other_threads(struct task_struct *p)
+int zap_other_threads(struct task_struct *p)
 {
-	struct task_struct *t;
+	struct task_struct *t = p;
+	int count = 0;
 
 	p->signal->group_stop_count = 0;
 
-	for (t = next_thread(p); t != p; t = next_thread(t)) {
-		/*
-		 * Don't bother with already dead threads
-		 */
+	while_each_thread(p, t) {
+		count++;
+
+		/* Don't bother with already dead threads */
 		if (t->exit_state)
 			continue;
-
-		/* SIGKILL will be handled before any pending SIGSTOP */
 		sigaddset(&t->pending.signal, SIGKILL);
 		signal_wake_up(t, 1);
 	}
+
+	return count;
 }
 
 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
diff --git a/kernel/smp.c b/kernel/smp.c
index 3fc6973..75c970c 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@
 	case CPU_UP_PREPARE_FROZEN:
 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 				cpu_to_node(cpu)))
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
 		break;
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0db913a..825e112 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -808,7 +808,7 @@
 		p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
 		if (IS_ERR(p)) {
 			printk("ksoftirqd for %i failed\n", hotcpu);
-			return NOTIFY_BAD;
+			return notifier_from_errno(PTR_ERR(p));
 		}
 		kthread_bind(p, hotcpu);
   		per_cpu(ksoftirqd, hotcpu) = p;
diff --git a/kernel/sys.c b/kernel/sys.c
index 0d36d88..e83ddbb 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,9 +1632,9 @@
 
 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
 
-static void argv_cleanup(char **argv, char **envp)
+static void argv_cleanup(struct subprocess_info *info)
 {
-	argv_free(argv);
+	argv_free(info->argv);
 }
 
 /**
@@ -1668,7 +1668,7 @@
 		goto out;
 	}
 
-	call_usermodehelper_setcleanup(info, argv_cleanup);
+	call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
 
 	ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
 
diff --git a/kernel/timer.c b/kernel/timer.c
index be394af..e3b8c69 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1680,11 +1680,14 @@
 				unsigned long action, void *hcpu)
 {
 	long cpu = (long)hcpu;
+	int err;
+
 	switch(action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		if (init_timers_cpu(cpu) < 0)
-			return NOTIFY_BAD;
+		err = init_timers_cpu(cpu);
+		if (err < 0)
+			return notifier_from_errno(err);
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
 	case CPU_DEAD:
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 77dabbf..327d2de 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1110,7 +1110,7 @@
 	unsigned int cpu = (unsigned long)hcpu;
 	struct cpu_workqueue_struct *cwq;
 	struct workqueue_struct *wq;
-	int ret = NOTIFY_OK;
+	int err = 0;
 
 	action &= ~CPU_TASKS_FROZEN;
 
@@ -1124,12 +1124,13 @@
 
 		switch (action) {
 		case CPU_UP_PREPARE:
-			if (!create_workqueue_thread(cwq, cpu))
+			err = create_workqueue_thread(cwq, cpu);
+			if (!err)
 				break;
 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
 				wq->name, cpu);
 			action = CPU_UP_CANCELED;
-			ret = NOTIFY_BAD;
+			err = -ENOMEM;
 			goto undo;
 
 		case CPU_ONLINE:
@@ -1150,7 +1151,7 @@
 		cpumask_clear_cpu(cpu, cpu_populated_map);
 	}
 
-	return ret;
+	return notifier_from_errno(err);
 }
 
 #ifdef CONFIG_SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2312089..e722e9d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -898,6 +898,18 @@
 	Documentation on how to use the module can be found in
 	Documentation/fault-injection/provoke-crashes.txt
 
+config CPU_NOTIFIER_ERROR_INJECT
+	tristate "CPU notifier error injection module"
+	depends on HOTPLUG_CPU && DEBUG_KERNEL
+	help
+	  This option provides a kernel module that can be used to test
+	  the error handling of the cpu notifiers
+
+	  To compile this code as a module, choose M here: the module will
+	  be called cpu-notifier-error-inject.
+
+	  If unsure, say N.
+
 config FAULT_INJECTION
 	bool "Fault-injection framework"
 	depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 9e6d3c2..c8567a5 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -85,6 +85,7 @@
 obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
 
 lib-$(CONFIG_GENERIC_BUG) += bug.o
 
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ffb78c9..d7137e7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -672,7 +672,7 @@
  *
  * The bit positions 0 through @bits are valid positions in @buf.
  */
-static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
+int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
 {
 	int pos = 0;
 
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 0000000..4dc2032
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+static int priority;
+static int cpu_up_prepare_error;
+static int cpu_down_prepare_error;
+
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify cpu notifier priority");
+
+module_param(cpu_up_prepare_error, int, 0644);
+MODULE_PARM_DESC(cpu_up_prepare_error,
+		"specify error code to inject CPU_UP_PREPARE action");
+
+module_param(cpu_down_prepare_error, int, 0644);
+MODULE_PARM_DESC(cpu_down_prepare_error,
+		"specify error code to inject CPU_DOWN_PREPARE action");
+
+static int err_inject_cpu_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	int err = 0;
+
+	switch (action) {
+	case CPU_UP_PREPARE:
+	case CPU_UP_PREPARE_FROZEN:
+		err = cpu_up_prepare_error;
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		err = cpu_down_prepare_error;
+		break;
+	}
+	if (err)
+		printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
+
+	return notifier_from_errno(err);
+}
+
+static struct notifier_block err_inject_cpu_notifier = {
+	.notifier_call = err_inject_cpu_callback,
+};
+
+static int err_inject_init(void)
+{
+	err_inject_cpu_notifier.priority = priority;
+
+	return register_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+static void err_inject_exit(void)
+{
+	unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("CPU notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/idr.c b/lib/idr.c
index 422a9d5..c1a2069 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -445,6 +445,7 @@
 void idr_remove_all(struct idr *idp)
 {
 	int n, id, max;
+	int bt_mask;
 	struct idr_layer *p;
 	struct idr_layer *pa[MAX_LEVEL];
 	struct idr_layer **paa = &pa[0];
@@ -462,8 +463,10 @@
 			p = p->ary[(id >> n) & IDR_MASK];
 		}
 
+		bt_mask = id;
 		id += 1 << n;
-		while (n < fls(id)) {
+		/* Get the highest bit that the above add changed from 0->1. */
+		while (n < fls(id ^ bt_mask)) {
 			if (p)
 				free_layer(p);
 			n += IDR_BITS;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 2a087e0..05da38b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -656,7 +656,7 @@
  *
  *	Returns: the index of the hole if found, otherwise returns an index
  *	outside of the set specified (in which case 'index - return >= max_scan'
- *	will be true). In rare cases of wrap-around, LONG_MAX will be returned.
+ *	will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
  *
  *	radix_tree_next_hole may be called under rcu_read_lock. However, like
  *	radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -674,7 +674,7 @@
 		if (!radix_tree_lookup(root, index))
 			break;
 		index--;
-		if (index == LONG_MAX)
+		if (index == ULONG_MAX)
 			break;
 	}
 
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4..870dc3f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
 #include <linux/jiffies.h>
 #include <linux/random.h>
 
-struct rnd_state {
-	u32 s1, s2, s3;
-};
-
 static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
 
-static u32 __random32(struct rnd_state *state)
+/**
+ *	prandom32 - seeded pseudo-random number generator.
+ *	@state: pointer to state structure holding seeded state.
+ *
+ *	This is used for pseudo-randomness with no outside seeding.
+ *	For more random results, use random32().
+ */
+u32 prandom32(struct rnd_state *state)
 {
 #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
 
@@ -55,14 +58,7 @@
 
 	return (state->s1 ^ state->s2 ^ state->s3);
 }
-
-/*
- * Handle minimum values for seeds
- */
-static inline u32 __seed(u32 x, u32 m)
-{
-	return (x < m) ? x + m : x;
-}
+EXPORT_SYMBOL(prandom32);
 
 /**
  *	random32 - pseudo random number generator
@@ -75,7 +71,7 @@
 {
 	unsigned long r;
 	struct rnd_state *state = &get_cpu_var(net_rand_state);
-	r = __random32(state);
+	r = prandom32(state);
 	put_cpu_var(state);
 	return r;
 }
@@ -118,12 +114,12 @@
 		state->s3 = __seed(LCG(state->s2), 15);
 
 		/* "warm it up" */
-		__random32(state);
-		__random32(state);
-		__random32(state);
-		__random32(state);
-		__random32(state);
-		__random32(state);
+		prandom32(state);
+		prandom32(state);
+		prandom32(state);
+		prandom32(state);
+		prandom32(state);
+		prandom32(state);
 	}
 	return 0;
 }
@@ -147,7 +143,7 @@
 		state->s3 = __seed(seeds[2], 15);
 
 		/* mix it in */
-		__random32(state);
+		prandom32(state);
 	}
 	return 0;
 }
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5fddf72..a009055 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -757,37 +757,6 @@
 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
 
 /*
- * Same as above, but for a sub-range of the mapping.
- */
-static void
-swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
-			  unsigned long offset, size_t size,
-			  int dir, int target)
-{
-	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
-}
-
-void
-swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-				  unsigned long offset, size_t size,
-				  enum dma_data_direction dir)
-{
-	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
-				  SYNC_FOR_CPU);
-}
-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
-
-void
-swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
-				     unsigned long offset, size_t size,
-				     enum dma_data_direction dir)
-{
-	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
-				  SYNC_FOR_DEVICE);
-}
-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
-
-/*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
  * This is the scatter-gather version of the above swiotlb_map_page
  * interface.  Here the scatter gather list elements are each tagged with the
diff --git a/mm/filemap.c b/mm/filemap.c
index 88d7196..35e12d1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1105,6 +1105,12 @@
 		}
 
 readpage:
+		/*
+		 * A previous I/O error may have been due to temporary
+		 * failures, eg. multipath errors.
+		 * PG_error will be set again if readpage fails.
+		 */
+		ClearPageError(page);
 		/* Start the actual read. The read will unlock the page. */
 		error = mapping->a_ops->readpage(filp, page);
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c8569bc..c6ece0a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -149,16 +149,35 @@
 	u64 threshold;
 };
 
+/* For threshold */
 struct mem_cgroup_threshold_ary {
 	/* An array index points to threshold just below usage. */
-	atomic_t current_threshold;
+	int current_threshold;
 	/* Size of entries[] */
 	unsigned int size;
 	/* Array of thresholds */
 	struct mem_cgroup_threshold entries[0];
 };
 
+struct mem_cgroup_thresholds {
+	/* Primary thresholds array */
+	struct mem_cgroup_threshold_ary *primary;
+	/*
+	 * Spare threshold array.
+	 * This is needed to make mem_cgroup_unregister_event() "never fail".
+	 * It must be able to store at least primary->size - 1 entries.
+	 */
+	struct mem_cgroup_threshold_ary *spare;
+};
+
+/* for OOM */
+struct mem_cgroup_eventfd_list {
+	struct list_head list;
+	struct eventfd_ctx *eventfd;
+};
+
 static void mem_cgroup_threshold(struct mem_cgroup *mem);
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 
 /*
  * The memory controller data structure. The memory controller controls both
@@ -207,6 +226,8 @@
 	atomic_t	refcnt;
 
 	unsigned int	swappiness;
+	/* OOM-Killer disable */
+	int		oom_kill_disable;
 
 	/* set when res.limit == memsw.limit */
 	bool		memsw_is_minimum;
@@ -215,17 +236,19 @@
 	struct mutex thresholds_lock;
 
 	/* thresholds for memory usage. RCU-protected */
-	struct mem_cgroup_threshold_ary *thresholds;
+	struct mem_cgroup_thresholds thresholds;
 
 	/* thresholds for mem+swap usage. RCU-protected */
-	struct mem_cgroup_threshold_ary *memsw_thresholds;
+	struct mem_cgroup_thresholds memsw_thresholds;
+
+	/* For oom notifier event fd */
+	struct list_head oom_notify;
 
 	/*
 	 * Should we move charges of a task when a task is moved into this
 	 * mem_cgroup ? And what type of charges should we move ?
 	 */
 	unsigned long 	move_charge_at_immigrate;
-
 	/*
 	 * percpu counter.
 	 */
@@ -239,6 +262,7 @@
  */
 enum move_type {
 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
+	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
 	NR_MOVE_TYPE,
 };
 
@@ -255,6 +279,18 @@
 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 };
 
+static bool move_anon(void)
+{
+	return test_bit(MOVE_CHARGE_TYPE_ANON,
+					&mc.to->move_charge_at_immigrate);
+}
+
+static bool move_file(void)
+{
+	return test_bit(MOVE_CHARGE_TYPE_FILE,
+					&mc.to->move_charge_at_immigrate);
+}
+
 /*
  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  * limit reclaim to prevent infinite loops, if they ever occur.
@@ -282,9 +318,12 @@
 /* for encoding cft->private value on file */
 #define _MEM			(0)
 #define _MEMSWAP		(1)
+#define _OOM_TYPE		(2)
 #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
 #define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)	((val) & 0xffff)
+/* Used for OOM nofiier */
+#define OOM_CONTROL		(0)
 
 /*
  * Reclaim flags for mem_cgroup_hierarchical_reclaim
@@ -1293,14 +1332,62 @@
 static DEFINE_MUTEX(memcg_oom_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
+struct oom_wait_info {
+	struct mem_cgroup *mem;
+	wait_queue_t	wait;
+};
+
+static int memcg_oom_wake_function(wait_queue_t *wait,
+	unsigned mode, int sync, void *arg)
+{
+	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
+	struct oom_wait_info *oom_wait_info;
+
+	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
+
+	if (oom_wait_info->mem == wake_mem)
+		goto wakeup;
+	/* if no hierarchy, no match */
+	if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
+		return 0;
+	/*
+	 * Both of oom_wait_info->mem and wake_mem are stable under us.
+	 * Then we can use css_is_ancestor without taking care of RCU.
+	 */
+	if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
+	    !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
+		return 0;
+
+wakeup:
+	return autoremove_wake_function(wait, mode, sync, arg);
+}
+
+static void memcg_wakeup_oom(struct mem_cgroup *mem)
+{
+	/* for filtering, pass "mem" as argument. */
+	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
+}
+
+static void memcg_oom_recover(struct mem_cgroup *mem)
+{
+	if (mem->oom_kill_disable && atomic_read(&mem->oom_lock))
+		memcg_wakeup_oom(mem);
+}
+
 /*
  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  */
 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
 {
-	DEFINE_WAIT(wait);
-	bool locked;
+	struct oom_wait_info owait;
+	bool locked, need_to_kill;
 
+	owait.mem = mem;
+	owait.wait.flags = 0;
+	owait.wait.func = memcg_oom_wake_function;
+	owait.wait.private = current;
+	INIT_LIST_HEAD(&owait.wait.task_list);
+	need_to_kill = true;
 	/* At first, try to OOM lock hierarchy under mem.*/
 	mutex_lock(&memcg_oom_mutex);
 	locked = mem_cgroup_oom_lock(mem);
@@ -1309,32 +1396,23 @@
 	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
 	 * under OOM is always welcomed, use TASK_KILLABLE here.
 	 */
-	if (!locked)
-		prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE);
+	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
+	if (!locked || mem->oom_kill_disable)
+		need_to_kill = false;
+	if (locked)
+		mem_cgroup_oom_notify(mem);
 	mutex_unlock(&memcg_oom_mutex);
 
-	if (locked)
+	if (need_to_kill) {
+		finish_wait(&memcg_oom_waitq, &owait.wait);
 		mem_cgroup_out_of_memory(mem, mask);
-	else {
+	} else {
 		schedule();
-		finish_wait(&memcg_oom_waitq, &wait);
+		finish_wait(&memcg_oom_waitq, &owait.wait);
 	}
 	mutex_lock(&memcg_oom_mutex);
 	mem_cgroup_oom_unlock(mem);
-	/*
-	 * Here, we use global waitq .....more fine grained waitq ?
-	 * Assume following hierarchy.
-	 * A/
-	 *   01
-	 *   02
-	 * assume OOM happens both in A and 01 at the same time. Tthey are
-	 * mutually exclusive by lock. (kill in 01 helps A.)
-	 * When we use per memcg waitq, we have to wake up waiters on A and 02
-	 * in addtion to waiters on 01. We use global waitq for avoiding mess.
-	 * It will not be a big problem.
-	 * (And a task may be moved to other groups while it's waiting for OOM.)
-	 */
-	wake_up_all(&memcg_oom_waitq);
+	memcg_wakeup_oom(mem);
 	mutex_unlock(&memcg_oom_mutex);
 
 	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
@@ -2118,15 +2196,6 @@
 	/* If swapout, usage of swap doesn't decrease */
 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
 		uncharge_memsw = false;
-	/*
-	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
-	 * In those cases, all pages freed continously can be expected to be in
-	 * the same cgroup and we have chance to coalesce uncharges.
-	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
-	 * because we want to do uncharge as soon as possible.
-	 */
-	if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
-		goto direct_uncharge;
 
 	batch = &current->memcg_batch;
 	/*
@@ -2137,6 +2206,17 @@
 	if (!batch->memcg)
 		batch->memcg = mem;
 	/*
+	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+	 * In those cases, all pages freed continously can be expected to be in
+	 * the same cgroup and we have chance to coalesce uncharges.
+	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+	 * because we want to do uncharge as soon as possible.
+	 */
+
+	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
+		goto direct_uncharge;
+
+	/*
 	 * In typical case, batch->memcg == mem. This means we can
 	 * merge a series of uncharges to an uncharge of res_counter.
 	 * If not, we uncharge res_counter ony by one.
@@ -2152,6 +2232,8 @@
 	res_counter_uncharge(&mem->res, PAGE_SIZE);
 	if (uncharge_memsw)
 		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+	if (unlikely(batch->memcg != mem))
+		memcg_oom_recover(mem);
 	return;
 }
 
@@ -2188,7 +2270,8 @@
 	switch (ctype) {
 	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
 	case MEM_CGROUP_CHARGE_TYPE_DROP:
-		if (page_mapped(page))
+		/* See mem_cgroup_prepare_migration() */
+		if (page_mapped(page) || PageCgroupMigration(pc))
 			goto unlock_out;
 		break;
 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -2288,6 +2371,7 @@
 		res_counter_uncharge(&batch->memcg->res, batch->bytes);
 	if (batch->memsw_bytes)
 		res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+	memcg_oom_recover(batch->memcg);
 	/* forget this pointer (for sanity check) */
 	batch->memcg = NULL;
 }
@@ -2410,10 +2494,12 @@
  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  * page belongs to.
  */
-int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
+int mem_cgroup_prepare_migration(struct page *page,
+	struct page *newpage, struct mem_cgroup **ptr)
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
+	enum charge_type ctype;
 	int ret = 0;
 
 	if (mem_cgroup_disabled())
@@ -2424,69 +2510,125 @@
 	if (PageCgroupUsed(pc)) {
 		mem = pc->mem_cgroup;
 		css_get(&mem->css);
+		/*
+		 * At migrating an anonymous page, its mapcount goes down
+		 * to 0 and uncharge() will be called. But, even if it's fully
+		 * unmapped, migration may fail and this page has to be
+		 * charged again. We set MIGRATION flag here and delay uncharge
+		 * until end_migration() is called
+		 *
+		 * Corner Case Thinking
+		 * A)
+		 * When the old page was mapped as Anon and it's unmap-and-freed
+		 * while migration was ongoing.
+		 * If unmap finds the old page, uncharge() of it will be delayed
+		 * until end_migration(). If unmap finds a new page, it's
+		 * uncharged when it make mapcount to be 1->0. If unmap code
+		 * finds swap_migration_entry, the new page will not be mapped
+		 * and end_migration() will find it(mapcount==0).
+		 *
+		 * B)
+		 * When the old page was mapped but migraion fails, the kernel
+		 * remaps it. A charge for it is kept by MIGRATION flag even
+		 * if mapcount goes down to 0. We can do remap successfully
+		 * without charging it again.
+		 *
+		 * C)
+		 * The "old" page is under lock_page() until the end of
+		 * migration, so, the old page itself will not be swapped-out.
+		 * If the new page is swapped out before end_migraton, our
+		 * hook to usual swap-out path will catch the event.
+		 */
+		if (PageAnon(page))
+			SetPageCgroupMigration(pc);
 	}
 	unlock_page_cgroup(pc);
+	/*
+	 * If the page is not charged at this point,
+	 * we return here.
+	 */
+	if (!mem)
+		return 0;
 
 	*ptr = mem;
-	if (mem) {
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
-		css_put(&mem->css);
+	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+	css_put(&mem->css);/* drop extra refcnt */
+	if (ret || *ptr == NULL) {
+		if (PageAnon(page)) {
+			lock_page_cgroup(pc);
+			ClearPageCgroupMigration(pc);
+			unlock_page_cgroup(pc);
+			/*
+			 * The old page may be fully unmapped while we kept it.
+			 */
+			mem_cgroup_uncharge_page(page);
+		}
+		return -ENOMEM;
 	}
+	/*
+	 * We charge new page before it's used/mapped. So, even if unlock_page()
+	 * is called before end_migration, we can catch all events on this new
+	 * page. In the case new page is migrated but not remapped, new page's
+	 * mapcount will be finally 0 and we call uncharge in end_migration().
+	 */
+	pc = lookup_page_cgroup(newpage);
+	if (PageAnon(page))
+		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
+	else if (page_is_file_cache(page))
+		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+	else
+		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+	__mem_cgroup_commit_charge(mem, pc, ctype);
 	return ret;
 }
 
 /* remove redundant charge if migration failed*/
 void mem_cgroup_end_migration(struct mem_cgroup *mem,
-		struct page *oldpage, struct page *newpage)
+	struct page *oldpage, struct page *newpage)
 {
-	struct page *target, *unused;
+	struct page *used, *unused;
 	struct page_cgroup *pc;
-	enum charge_type ctype;
 
 	if (!mem)
 		return;
+	/* blocks rmdir() */
 	cgroup_exclude_rmdir(&mem->css);
 	/* at migration success, oldpage->mapping is NULL. */
 	if (oldpage->mapping) {
-		target = oldpage;
-		unused = NULL;
+		used = oldpage;
+		unused = newpage;
 	} else {
-		target = newpage;
+		used = newpage;
 		unused = oldpage;
 	}
-
-	if (PageAnon(target))
-		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
-	else if (page_is_file_cache(target))
-		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
-	else
-		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-
-	/* unused page is not on radix-tree now. */
-	if (unused)
-		__mem_cgroup_uncharge_common(unused, ctype);
-
-	pc = lookup_page_cgroup(target);
 	/*
-	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
-	 * So, double-counting is effectively avoided.
+	 * We disallowed uncharge of pages under migration because mapcount
+	 * of the page goes down to zero, temporarly.
+	 * Clear the flag and check the page should be charged.
 	 */
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	pc = lookup_page_cgroup(oldpage);
+	lock_page_cgroup(pc);
+	ClearPageCgroupMigration(pc);
+	unlock_page_cgroup(pc);
 
+	if (unused != oldpage)
+		pc = lookup_page_cgroup(unused);
+	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
+
+	pc = lookup_page_cgroup(used);
 	/*
-	 * Both of oldpage and newpage are still under lock_page().
-	 * Then, we don't have to care about race in radix-tree.
-	 * But we have to be careful that this page is unmapped or not.
-	 *
-	 * There is a case for !page_mapped(). At the start of
-	 * migration, oldpage was mapped. But now, it's zapped.
-	 * But we know *target* page is not freed/reused under us.
-	 * mem_cgroup_uncharge_page() does all necessary checks.
+	 * If a page is a file cache, radix-tree replacement is very atomic
+	 * and we can skip this check. When it was an Anon page, its mapcount
+	 * goes down to 0. But because we added MIGRATION flage, it's not
+	 * uncharged yet. There are several case but page->mapcount check
+	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
+	 * check. (see prepare_charge() also)
 	 */
-	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
-		mem_cgroup_uncharge_page(target);
+	if (PageAnon(used))
+		mem_cgroup_uncharge_page(used);
 	/*
-	 * At migration, we may charge account against cgroup which has no tasks
+	 * At migration, we may charge account against cgroup which has no
+	 * tasks.
 	 * So, rmdir()->pre_destroy() can be called while we do this charge.
 	 * In that case, we need to call pre_destroy() again. check it here.
 	 */
@@ -2524,10 +2666,11 @@
 				unsigned long long val)
 {
 	int retry_count;
-	u64 memswlimit;
+	u64 memswlimit, memlimit;
 	int ret = 0;
 	int children = mem_cgroup_count_children(memcg);
 	u64 curusage, oldusage;
+	int enlarge;
 
 	/*
 	 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2538,6 +2681,7 @@
 
 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
 
+	enlarge = 0;
 	while (retry_count) {
 		if (signal_pending(current)) {
 			ret = -EINTR;
@@ -2555,6 +2699,11 @@
 			mutex_unlock(&set_limit_mutex);
 			break;
 		}
+
+		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+		if (memlimit < val)
+			enlarge = 1;
+
 		ret = res_counter_set_limit(&memcg->res, val);
 		if (!ret) {
 			if (memswlimit == val)
@@ -2576,6 +2725,8 @@
 		else
 			oldusage = curusage;
 	}
+	if (!ret && enlarge)
+		memcg_oom_recover(memcg);
 
 	return ret;
 }
@@ -2584,9 +2735,10 @@
 					unsigned long long val)
 {
 	int retry_count;
-	u64 memlimit, oldusage, curusage;
+	u64 memlimit, memswlimit, oldusage, curusage;
 	int children = mem_cgroup_count_children(memcg);
 	int ret = -EBUSY;
+	int enlarge = 0;
 
 	/* see mem_cgroup_resize_res_limit */
  	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
@@ -2608,6 +2760,9 @@
 			mutex_unlock(&set_limit_mutex);
 			break;
 		}
+		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+		if (memswlimit < val)
+			enlarge = 1;
 		ret = res_counter_set_limit(&memcg->memsw, val);
 		if (!ret) {
 			if (memlimit == val)
@@ -2630,6 +2785,8 @@
 		else
 			oldusage = curusage;
 	}
+	if (!ret && enlarge)
+		memcg_oom_recover(memcg);
 	return ret;
 }
 
@@ -2821,6 +2978,7 @@
 			if (ret)
 				break;
 		}
+		memcg_oom_recover(mem);
 		/* it seems parent cgroup doesn't have enough mem */
 		if (ret == -ENOMEM)
 			goto try_to_free;
@@ -3311,9 +3469,9 @@
 
 	rcu_read_lock();
 	if (!swap)
-		t = rcu_dereference(memcg->thresholds);
+		t = rcu_dereference(memcg->thresholds.primary);
 	else
-		t = rcu_dereference(memcg->memsw_thresholds);
+		t = rcu_dereference(memcg->memsw_thresholds.primary);
 
 	if (!t)
 		goto unlock;
@@ -3325,7 +3483,7 @@
 	 * If it's not true, a threshold was crossed after last
 	 * call of __mem_cgroup_threshold().
 	 */
-	i = atomic_read(&t->current_threshold);
+	i = t->current_threshold;
 
 	/*
 	 * Iterate backward over array of thresholds starting from
@@ -3349,7 +3507,7 @@
 		eventfd_signal(t->entries[i].eventfd, 1);
 
 	/* Update current_threshold */
-	atomic_set(&t->current_threshold, i - 1);
+	t->current_threshold = i - 1;
 unlock:
 	rcu_read_unlock();
 }
@@ -3369,106 +3527,117 @@
 	return _a->threshold - _b->threshold;
 }
 
-static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
-		struct eventfd_ctx *eventfd, const char *args)
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+{
+	struct mem_cgroup_eventfd_list *ev;
+
+	list_for_each_entry(ev, &mem->oom_notify, list)
+		eventfd_signal(ev->eventfd, 1);
+	return 0;
+}
+
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
+{
+	mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+}
+
+static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
+	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
-	struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
+	struct mem_cgroup_thresholds *thresholds;
+	struct mem_cgroup_threshold_ary *new;
 	int type = MEMFILE_TYPE(cft->private);
 	u64 threshold, usage;
-	int size;
-	int i, ret;
+	int i, size, ret;
 
 	ret = res_counter_memparse_write_strategy(args, &threshold);
 	if (ret)
 		return ret;
 
 	mutex_lock(&memcg->thresholds_lock);
+
 	if (type == _MEM)
-		thresholds = memcg->thresholds;
+		thresholds = &memcg->thresholds;
 	else if (type == _MEMSWAP)
-		thresholds = memcg->memsw_thresholds;
+		thresholds = &memcg->memsw_thresholds;
 	else
 		BUG();
 
 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
 
 	/* Check if a threshold crossed before adding a new one */
-	if (thresholds)
+	if (thresholds->primary)
 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
-	if (thresholds)
-		size = thresholds->size + 1;
-	else
-		size = 1;
+	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
 
 	/* Allocate memory for new array of thresholds */
-	thresholds_new = kmalloc(sizeof(*thresholds_new) +
-			size * sizeof(struct mem_cgroup_threshold),
+	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
 			GFP_KERNEL);
-	if (!thresholds_new) {
+	if (!new) {
 		ret = -ENOMEM;
 		goto unlock;
 	}
-	thresholds_new->size = size;
+	new->size = size;
 
 	/* Copy thresholds (if any) to new array */
-	if (thresholds)
-		memcpy(thresholds_new->entries, thresholds->entries,
-				thresholds->size *
+	if (thresholds->primary) {
+		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
 				sizeof(struct mem_cgroup_threshold));
+	}
+
 	/* Add new threshold */
-	thresholds_new->entries[size - 1].eventfd = eventfd;
-	thresholds_new->entries[size - 1].threshold = threshold;
+	new->entries[size - 1].eventfd = eventfd;
+	new->entries[size - 1].threshold = threshold;
 
 	/* Sort thresholds. Registering of new threshold isn't time-critical */
-	sort(thresholds_new->entries, size,
-			sizeof(struct mem_cgroup_threshold),
+	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
 			compare_thresholds, NULL);
 
 	/* Find current threshold */
-	atomic_set(&thresholds_new->current_threshold, -1);
+	new->current_threshold = -1;
 	for (i = 0; i < size; i++) {
-		if (thresholds_new->entries[i].threshold < usage) {
+		if (new->entries[i].threshold < usage) {
 			/*
-			 * thresholds_new->current_threshold will not be used
-			 * until rcu_assign_pointer(), so it's safe to increment
+			 * new->current_threshold will not be used until
+			 * rcu_assign_pointer(), so it's safe to increment
 			 * it here.
 			 */
-			atomic_inc(&thresholds_new->current_threshold);
+			++new->current_threshold;
 		}
 	}
 
-	if (type == _MEM)
-		rcu_assign_pointer(memcg->thresholds, thresholds_new);
-	else
-		rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
+	/* Free old spare buffer and save old primary buffer as spare */
+	kfree(thresholds->spare);
+	thresholds->spare = thresholds->primary;
 
-	/* To be sure that nobody uses thresholds before freeing it */
+	rcu_assign_pointer(thresholds->primary, new);
+
+	/* To be sure that nobody uses thresholds */
 	synchronize_rcu();
 
-	kfree(thresholds);
 unlock:
 	mutex_unlock(&memcg->thresholds_lock);
 
 	return ret;
 }
 
-static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
-		struct eventfd_ctx *eventfd)
+static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+	struct cftype *cft, struct eventfd_ctx *eventfd)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
-	struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
+	struct mem_cgroup_thresholds *thresholds;
+	struct mem_cgroup_threshold_ary *new;
 	int type = MEMFILE_TYPE(cft->private);
 	u64 usage;
-	int size = 0;
-	int i, j, ret;
+	int i, j, size;
 
 	mutex_lock(&memcg->thresholds_lock);
 	if (type == _MEM)
-		thresholds = memcg->thresholds;
+		thresholds = &memcg->thresholds;
 	else if (type == _MEMSWAP)
-		thresholds = memcg->memsw_thresholds;
+		thresholds = &memcg->memsw_thresholds;
 	else
 		BUG();
 
@@ -3484,59 +3653,136 @@
 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
 	/* Calculate new number of threshold */
-	for (i = 0; i < thresholds->size; i++) {
-		if (thresholds->entries[i].eventfd != eventfd)
+	size = 0;
+	for (i = 0; i < thresholds->primary->size; i++) {
+		if (thresholds->primary->entries[i].eventfd != eventfd)
 			size++;
 	}
 
+	new = thresholds->spare;
+
 	/* Set thresholds array to NULL if we don't have thresholds */
 	if (!size) {
-		thresholds_new = NULL;
-		goto assign;
+		kfree(new);
+		new = NULL;
+		goto swap_buffers;
 	}
 
-	/* Allocate memory for new array of thresholds */
-	thresholds_new = kmalloc(sizeof(*thresholds_new) +
-			size * sizeof(struct mem_cgroup_threshold),
-			GFP_KERNEL);
-	if (!thresholds_new) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
-	thresholds_new->size = size;
+	new->size = size;
 
 	/* Copy thresholds and find current threshold */
-	atomic_set(&thresholds_new->current_threshold, -1);
-	for (i = 0, j = 0; i < thresholds->size; i++) {
-		if (thresholds->entries[i].eventfd == eventfd)
+	new->current_threshold = -1;
+	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
+		if (thresholds->primary->entries[i].eventfd == eventfd)
 			continue;
 
-		thresholds_new->entries[j] = thresholds->entries[i];
-		if (thresholds_new->entries[j].threshold < usage) {
+		new->entries[j] = thresholds->primary->entries[i];
+		if (new->entries[j].threshold < usage) {
 			/*
-			 * thresholds_new->current_threshold will not be used
+			 * new->current_threshold will not be used
 			 * until rcu_assign_pointer(), so it's safe to increment
 			 * it here.
 			 */
-			atomic_inc(&thresholds_new->current_threshold);
+			++new->current_threshold;
 		}
 		j++;
 	}
 
-assign:
-	if (type == _MEM)
-		rcu_assign_pointer(memcg->thresholds, thresholds_new);
-	else
-		rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
+swap_buffers:
+	/* Swap primary and spare array */
+	thresholds->spare = thresholds->primary;
+	rcu_assign_pointer(thresholds->primary, new);
 
-	/* To be sure that nobody uses thresholds before freeing it */
+	/* To be sure that nobody uses thresholds */
 	synchronize_rcu();
 
-	kfree(thresholds);
-unlock:
 	mutex_unlock(&memcg->thresholds_lock);
+}
 
-	return ret;
+static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
+	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup_eventfd_list *event;
+	int type = MEMFILE_TYPE(cft->private);
+
+	BUG_ON(type != _OOM_TYPE);
+	event = kmalloc(sizeof(*event),	GFP_KERNEL);
+	if (!event)
+		return -ENOMEM;
+
+	mutex_lock(&memcg_oom_mutex);
+
+	event->eventfd = eventfd;
+	list_add(&event->list, &memcg->oom_notify);
+
+	/* already in OOM ? */
+	if (atomic_read(&memcg->oom_lock))
+		eventfd_signal(eventfd, 1);
+	mutex_unlock(&memcg_oom_mutex);
+
+	return 0;
+}
+
+static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+	struct cftype *cft, struct eventfd_ctx *eventfd)
+{
+	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup_eventfd_list *ev, *tmp;
+	int type = MEMFILE_TYPE(cft->private);
+
+	BUG_ON(type != _OOM_TYPE);
+
+	mutex_lock(&memcg_oom_mutex);
+
+	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
+		if (ev->eventfd == eventfd) {
+			list_del(&ev->list);
+			kfree(ev);
+		}
+	}
+
+	mutex_unlock(&memcg_oom_mutex);
+}
+
+static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
+	struct cftype *cft,  struct cgroup_map_cb *cb)
+{
+	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
+
+	if (atomic_read(&mem->oom_lock))
+		cb->fill(cb, "under_oom", 1);
+	else
+		cb->fill(cb, "under_oom", 0);
+	return 0;
+}
+
+/*
+ */
+static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+	struct cftype *cft, u64 val)
+{
+	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup *parent;
+
+	/* cannot set to root cgroup and only 0 and 1 are allowed */
+	if (!cgrp->parent || !((val == 0) || (val == 1)))
+		return -EINVAL;
+
+	parent = mem_cgroup_from_cont(cgrp->parent);
+
+	cgroup_lock();
+	/* oom-kill-disable is a flag for subhierarchy. */
+	if ((parent->use_hierarchy) ||
+	    (mem->use_hierarchy && !list_empty(&cgrp->children))) {
+		cgroup_unlock();
+		return -EINVAL;
+	}
+	mem->oom_kill_disable = val;
+	cgroup_unlock();
+	return 0;
 }
 
 static struct cftype mem_cgroup_files[] = {
@@ -3544,8 +3790,8 @@
 		.name = "usage_in_bytes",
 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
 		.read_u64 = mem_cgroup_read,
-		.register_event = mem_cgroup_register_event,
-		.unregister_event = mem_cgroup_unregister_event,
+		.register_event = mem_cgroup_usage_register_event,
+		.unregister_event = mem_cgroup_usage_unregister_event,
 	},
 	{
 		.name = "max_usage_in_bytes",
@@ -3594,6 +3840,14 @@
 		.read_u64 = mem_cgroup_move_charge_read,
 		.write_u64 = mem_cgroup_move_charge_write,
 	},
+	{
+		.name = "oom_control",
+		.read_map = mem_cgroup_oom_control_read,
+		.write_u64 = mem_cgroup_oom_control_write,
+		.register_event = mem_cgroup_oom_register_event,
+		.unregister_event = mem_cgroup_oom_unregister_event,
+		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
+	},
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -3602,8 +3856,8 @@
 		.name = "memsw.usage_in_bytes",
 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
 		.read_u64 = mem_cgroup_read,
-		.register_event = mem_cgroup_register_event,
-		.unregister_event = mem_cgroup_unregister_event,
+		.register_event = mem_cgroup_usage_register_event,
+		.unregister_event = mem_cgroup_usage_unregister_event,
 	},
 	{
 		.name = "memsw.max_usage_in_bytes",
@@ -3831,6 +4085,7 @@
 	} else {
 		parent = mem_cgroup_from_cont(cont->parent);
 		mem->use_hierarchy = parent->use_hierarchy;
+		mem->oom_kill_disable = parent->oom_kill_disable;
 	}
 
 	if (parent && parent->use_hierarchy) {
@@ -3849,6 +4104,7 @@
 	}
 	mem->last_scanned_child = 0;
 	spin_lock_init(&mem->reclaim_param_lock);
+	INIT_LIST_HEAD(&mem->oom_notify);
 
 	if (parent)
 		mem->swappiness = get_swappiness(parent);
@@ -3976,6 +4232,80 @@
 	MC_TARGET_SWAP,
 };
 
+static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
+						unsigned long addr, pte_t ptent)
+{
+	struct page *page = vm_normal_page(vma, addr, ptent);
+
+	if (!page || !page_mapped(page))
+		return NULL;
+	if (PageAnon(page)) {
+		/* we don't move shared anon */
+		if (!move_anon() || page_mapcount(page) > 2)
+			return NULL;
+	} else if (!move_file())
+		/* we ignore mapcount for file pages */
+		return NULL;
+	if (!get_page_unless_zero(page))
+		return NULL;
+
+	return page;
+}
+
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+			unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+	int usage_count;
+	struct page *page = NULL;
+	swp_entry_t ent = pte_to_swp_entry(ptent);
+
+	if (!move_anon() || non_swap_entry(ent))
+		return NULL;
+	usage_count = mem_cgroup_count_swap_user(ent, &page);
+	if (usage_count > 1) { /* we don't move shared anon */
+		if (page)
+			put_page(page);
+		return NULL;
+	}
+	if (do_swap_account)
+		entry->val = ent.val;
+
+	return page;
+}
+
+static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
+			unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+	struct page *page = NULL;
+	struct inode *inode;
+	struct address_space *mapping;
+	pgoff_t pgoff;
+
+	if (!vma->vm_file) /* anonymous vma */
+		return NULL;
+	if (!move_file())
+		return NULL;
+
+	inode = vma->vm_file->f_path.dentry->d_inode;
+	mapping = vma->vm_file->f_mapping;
+	if (pte_none(ptent))
+		pgoff = linear_page_index(vma, addr);
+	else /* pte_file(ptent) is true */
+		pgoff = pte_to_pgoff(ptent);
+
+	/* page is moved even if it's not RSS of this task(page-faulted). */
+	if (!mapping_cap_swap_backed(mapping)) { /* normal file */
+		page = find_get_page(mapping, pgoff);
+	} else { /* shmem/tmpfs file. we should take account of swap too. */
+		swp_entry_t ent;
+		mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
+		if (do_swap_account)
+			entry->val = ent.val;
+	}
+
+	return page;
+}
+
 static int is_target_pte_for_mc(struct vm_area_struct *vma,
 		unsigned long addr, pte_t ptent, union mc_target *target)
 {
@@ -3983,43 +4313,16 @@
 	struct page_cgroup *pc;
 	int ret = 0;
 	swp_entry_t ent = { .val = 0 };
-	int usage_count = 0;
-	bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
-					&mc.to->move_charge_at_immigrate);
 
-	if (!pte_present(ptent)) {
-		/* TODO: handle swap of shmes/tmpfs */
-		if (pte_none(ptent) || pte_file(ptent))
-			return 0;
-		else if (is_swap_pte(ptent)) {
-			ent = pte_to_swp_entry(ptent);
-			if (!move_anon || non_swap_entry(ent))
-				return 0;
-			usage_count = mem_cgroup_count_swap_user(ent, &page);
-		}
-	} else {
-		page = vm_normal_page(vma, addr, ptent);
-		if (!page || !page_mapped(page))
-			return 0;
-		/*
-		 * TODO: We don't move charges of file(including shmem/tmpfs)
-		 * pages for now.
-		 */
-		if (!move_anon || !PageAnon(page))
-			return 0;
-		if (!get_page_unless_zero(page))
-			return 0;
-		usage_count = page_mapcount(page);
-	}
-	if (usage_count > 1) {
-		/*
-		 * TODO: We don't move charges of shared(used by multiple
-		 * processes) pages for now.
-		 */
-		if (page)
-			put_page(page);
+	if (pte_present(ptent))
+		page = mc_handle_present_pte(vma, addr, ptent);
+	else if (is_swap_pte(ptent))
+		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
+	else if (pte_none(ptent) || pte_file(ptent))
+		page = mc_handle_file_pte(vma, addr, ptent, &ent);
+
+	if (!page && !ent.val)
 		return 0;
-	}
 	if (page) {
 		pc = lookup_page_cgroup(page);
 		/*
@@ -4035,8 +4338,8 @@
 		if (!ret || !target)
 			put_page(page);
 	}
-	/* throught */
-	if (ent.val && do_swap_account && !ret &&
+	/* There is a swap entry and a page doesn't exist or isn't charged */
+	if (ent.val && !ret &&
 			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
 		ret = MC_TARGET_SWAP;
 		if (target)
@@ -4077,9 +4380,6 @@
 		};
 		if (is_vm_hugetlb_page(vma))
 			continue;
-		/* TODO: We don't move charges of shmem/tmpfs pages for now. */
-		if (vma->vm_flags & VM_SHARED)
-			continue;
 		walk_page_range(vma->vm_start, vma->vm_end,
 					&mem_cgroup_count_precharge_walk);
 	}
@@ -4102,6 +4402,7 @@
 	if (mc.precharge) {
 		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
 		mc.precharge = 0;
+		memcg_oom_recover(mc.to);
 	}
 	/*
 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
@@ -4110,6 +4411,7 @@
 	if (mc.moved_charge) {
 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
 		mc.moved_charge = 0;
+		memcg_oom_recover(mc.from);
 	}
 	/* we must fixup refcnts and charges */
 	if (mc.moved_swap) {
@@ -4274,9 +4576,6 @@
 		};
 		if (is_vm_hugetlb_page(vma))
 			continue;
-		/* TODO: We don't move charges of shmem/tmpfs pages for now. */
-		if (vma->vm_flags & VM_SHARED)
-			continue;
 		ret = walk_page_range(vma->vm_start, vma->vm_end,
 						&mem_cgroup_move_charge_walk);
 		if (ret)
diff --git a/mm/migrate.c b/mm/migrate.c
index 09e2471..4205b1d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -590,7 +590,7 @@
 	}
 
 	/* charge against new page */
-	charge = mem_cgroup_prepare_migration(page, &mem);
+	charge = mem_cgroup_prepare_migration(page, newpage, &mem);
 	if (charge == -ENOMEM) {
 		rc = -ENOMEM;
 		goto unlock;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b68e802..709aedf 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -479,12 +479,9 @@
 	read_lock(&tasklist_lock);
 retry:
 	p = select_bad_process(&points, mem);
-	if (PTR_ERR(p) == -1UL)
+	if (!p || PTR_ERR(p) == -1UL)
 		goto out;
 
-	if (!p)
-		p = current;
-
 	if (oom_kill_process(p, gfp_mask, 0, points, mem,
 				"Memory cgroup out of memory"))
 		goto retry;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08b3499..431214b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,22 @@
 #include <asm/div64.h>
 #include "internal.h"
 
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+DEFINE_PER_CPU(int, numa_node);
+EXPORT_PER_CPU_SYMBOL(numa_node);
+#endif
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+/*
+ * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
+ * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
+ * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
+ * defined in <linux/topology.h>.
+ */
+DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
+EXPORT_PER_CPU_SYMBOL(_numa_mem_);
+#endif
+
 /*
  * Array of node states.
  */
@@ -2856,6 +2872,24 @@
 		zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
 }
 
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+/*
+ * Return node id of node used for "local" allocations.
+ * I.e., first node id of first zone in arg node's generic zonelist.
+ * Used for initializing percpu 'numa_mem', which is used primarily
+ * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
+ */
+int local_memory_node(int node)
+{
+	struct zone *zone;
+
+	(void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
+				   gfp_zone(GFP_KERNEL),
+				   NULL,
+				   &zone);
+	return zone->node;
+}
+#endif
 
 #else	/* CONFIG_NUMA */
 
@@ -2970,9 +3004,23 @@
 	 * needs the percpu allocator in order to allocate its pagesets
 	 * (a chicken-egg dilemma).
 	 */
-	for_each_possible_cpu(cpu)
+	for_each_possible_cpu(cpu) {
 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
 
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+		/*
+		 * We now know the "local memory node" for each node--
+		 * i.e., the node of the first zone in the generic zonelist.
+		 * Set up numa_mem percpu variable for on-line cpus.  During
+		 * boot, only the boot cpu should be on-line;  we'll init the
+		 * secondary cpus' numa_mem as they come on-line.  During
+		 * node/memory hotplug, we'll fixup all on-line cpus.
+		 */
+		if (cpu_online(cpu))
+			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
+#endif
+	}
+
 	return 0;
 }
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 4ef9797..855eaf5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2559,6 +2559,45 @@
 	return error;
 }
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/**
+ * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
+ * @inode: the inode to be searched
+ * @pgoff: the offset to be searched
+ * @pagep: the pointer for the found page to be stored
+ * @ent: the pointer for the found swap entry to be stored
+ *
+ * If a page is found, refcount of it is incremented. Callers should handle
+ * these refcount.
+ */
+void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+					struct page **pagep, swp_entry_t *ent)
+{
+	swp_entry_t entry = { .val = 0 }, *ptr;
+	struct page *page = NULL;
+	struct shmem_inode_info *info = SHMEM_I(inode);
+
+	if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+		goto out;
+
+	spin_lock(&info->lock);
+	ptr = shmem_swp_entry(info, pgoff, NULL);
+#ifdef CONFIG_SWAP
+	if (ptr && ptr->val) {
+		entry.val = ptr->val;
+		page = find_get_page(&swapper_space, entry.val);
+	} else
+#endif
+		page = find_get_page(inode->i_mapping, pgoff);
+	if (ptr)
+		shmem_swp_unmap(ptr);
+	spin_unlock(&info->lock);
+out:
+	*pagep = page;
+	*ent = entry;
+}
+#endif
+
 #else /* !CONFIG_SHMEM */
 
 /*
@@ -2598,6 +2637,31 @@
 	return 0;
 }
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/**
+ * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
+ * @inode: the inode to be searched
+ * @pgoff: the offset to be searched
+ * @pagep: the pointer for the found page to be stored
+ * @ent: the pointer for the found swap entry to be stored
+ *
+ * If a page is found, refcount of it is incremented. Callers should handle
+ * these refcount.
+ */
+void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+					struct page **pagep, swp_entry_t *ent)
+{
+	struct page *page = NULL;
+
+	if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+		goto out;
+	page = find_get_page(inode->i_mapping, pgoff);
+out:
+	*pagep = page;
+	*ent = (swp_entry_t){ .val = 0 };
+}
+#endif
+
 #define shmem_vm_ops				generic_file_vm_ops
 #define shmem_file_operations			ramfs_file_operations
 #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
diff --git a/mm/slab.c b/mm/slab.c
index 02786e1..e49f8f4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -821,7 +821,7 @@
 {
 	int node;
 
-	node = next_node(cpu_to_node(cpu), node_online_map);
+	node = next_node(cpu_to_mem(cpu), node_online_map);
 	if (node == MAX_NUMNODES)
 		node = first_node(node_online_map);
 
@@ -1050,7 +1050,7 @@
 	struct array_cache *alien = NULL;
 	int node;
 
-	node = numa_node_id();
+	node = numa_mem_id();
 
 	/*
 	 * Make sure we are not freeing a object from another node to the array
@@ -1129,7 +1129,7 @@
 {
 	struct kmem_cache *cachep;
 	struct kmem_list3 *l3 = NULL;
-	int node = cpu_to_node(cpu);
+	int node = cpu_to_mem(cpu);
 	const struct cpumask *mask = cpumask_of_node(node);
 
 	list_for_each_entry(cachep, &cache_chain, next) {
@@ -1194,7 +1194,7 @@
 {
 	struct kmem_cache *cachep;
 	struct kmem_list3 *l3 = NULL;
-	int node = cpu_to_node(cpu);
+	int node = cpu_to_mem(cpu);
 	int err;
 
 	/*
@@ -1321,7 +1321,7 @@
 		mutex_unlock(&cache_chain_mutex);
 		break;
 	}
-	return err ? NOTIFY_BAD : NOTIFY_OK;
+	return notifier_from_errno(err);
 }
 
 static struct notifier_block __cpuinitdata cpucache_notifier = {
@@ -1479,7 +1479,7 @@
 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
 	 */
 
-	node = numa_node_id();
+	node = numa_mem_id();
 
 	/* 1) create the cache_cache */
 	INIT_LIST_HEAD(&cache_chain);
@@ -2121,7 +2121,7 @@
 			}
 		}
 	}
-	cachep->nodelists[numa_node_id()]->next_reap =
+	cachep->nodelists[numa_mem_id()]->next_reap =
 			jiffies + REAPTIMEOUT_LIST3 +
 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
 
@@ -2452,7 +2452,7 @@
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
-	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
+	assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
 #endif
 }
 
@@ -2479,7 +2479,7 @@
 {
 	struct kmem_cache *cachep = arg;
 	struct array_cache *ac;
-	int node = numa_node_id();
+	int node = numa_mem_id();
 
 	check_irq_off();
 	ac = cpu_cache_get(cachep);
@@ -3012,7 +3012,7 @@
 
 retry:
 	check_irq_off();
-	node = numa_node_id();
+	node = numa_mem_id();
 	ac = cpu_cache_get(cachep);
 	batchcount = ac->batchcount;
 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -3216,10 +3216,10 @@
 
 	if (in_interrupt() || (flags & __GFP_THISNODE))
 		return NULL;
-	nid_alloc = nid_here = numa_node_id();
+	nid_alloc = nid_here = numa_mem_id();
 	get_mems_allowed();
 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
-		nid_alloc = cpuset_mem_spread_node();
+		nid_alloc = cpuset_slab_spread_node();
 	else if (current->mempolicy)
 		nid_alloc = slab_node(current->mempolicy);
 	put_mems_allowed();
@@ -3281,7 +3281,7 @@
 		if (local_flags & __GFP_WAIT)
 			local_irq_enable();
 		kmem_flagcheck(cache, flags);
-		obj = kmem_getpages(cache, local_flags, numa_node_id());
+		obj = kmem_getpages(cache, local_flags, numa_mem_id());
 		if (local_flags & __GFP_WAIT)
 			local_irq_disable();
 		if (obj) {
@@ -3389,6 +3389,7 @@
 {
 	unsigned long save_flags;
 	void *ptr;
+	int slab_node = numa_mem_id();
 
 	flags &= gfp_allowed_mask;
 
@@ -3401,7 +3402,7 @@
 	local_irq_save(save_flags);
 
 	if (nodeid == -1)
-		nodeid = numa_node_id();
+		nodeid = slab_node;
 
 	if (unlikely(!cachep->nodelists[nodeid])) {
 		/* Node not bootstrapped yet */
@@ -3409,7 +3410,7 @@
 		goto out;
 	}
 
-	if (nodeid == numa_node_id()) {
+	if (nodeid == slab_node) {
 		/*
 		 * Use the locally cached objects if possible.
 		 * However ____cache_alloc does not allow fallback
@@ -3453,8 +3454,8 @@
 	 * We may just have run out of memory on the local node.
 	 * ____cache_alloc_node() knows how to locate memory on other nodes
 	 */
- 	if (!objp)
- 		objp = ____cache_alloc_node(cache, flags, numa_node_id());
+	if (!objp)
+		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
 
   out:
 	return objp;
@@ -3551,7 +3552,7 @@
 {
 	int batchcount;
 	struct kmem_list3 *l3;
-	int node = numa_node_id();
+	int node = numa_mem_id();
 
 	batchcount = ac->batchcount;
 #if DEBUG
@@ -3985,7 +3986,7 @@
 		return -ENOMEM;
 
 	for_each_online_cpu(i) {
-		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
+		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
 						batchcount, gfp);
 		if (!new->new[i]) {
 			for (i--; i >= 0; i--)
@@ -4007,9 +4008,9 @@
 		struct array_cache *ccold = new->new[i];
 		if (!ccold)
 			continue;
-		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
-		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
-		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
+		spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
+		spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
 		kfree(ccold);
 	}
 	kfree(new);
@@ -4115,7 +4116,7 @@
 {
 	struct kmem_cache *searchp;
 	struct kmem_list3 *l3;
-	int node = numa_node_id();
+	int node = numa_mem_id();
 	struct delayed_work *work = to_delayed_work(w);
 
 	if (!mutex_trylock(&cache_chain_mutex))
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b283..f28ad2cc 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@
 		iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
 					GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
 		if (!iucv_irq_data[cpu])
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
+
 		iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
 				     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
 		if (!iucv_param[cpu]) {
 			kfree(iucv_irq_data[cpu]);
 			iucv_irq_data[cpu] = NULL;
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
 		}
 		iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
 					GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@
 			iucv_param[cpu] = NULL;
 			kfree(iucv_irq_data[cpu]);
 			iucv_irq_data[cpu] = NULL;
-			return NOTIFY_BAD;
+			return notifier_from_errno(-ENOMEM);
 		}
 		break;
 	case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@
 		cpu_clear(cpu, cpumask);
 		if (cpus_empty(cpumask))
 			/* Can't offline last IUCV enabled cpu. */
-			return NOTIFY_BAD;
+			return notifier_from_errno(-EINVAL);
 		smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
 		if (cpus_empty(iucv_irq_cpumask))
 			smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b7cd8cc..2a96751 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2293,6 +2293,7 @@
 	struct sockaddr *addr = args->dstaddr;
 	struct rpc_xprt *xprt;
 	struct sock_xprt *transport;
+	struct rpc_xprt *ret;
 
 	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
 	if (IS_ERR(xprt))
@@ -2330,8 +2331,8 @@
 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
 		break;
 	default:
-		kfree(xprt);
-		return ERR_PTR(-EAFNOSUPPORT);
+		ret = ERR_PTR(-EAFNOSUPPORT);
+		goto out_err;
 	}
 
 	if (xprt_bound(xprt))
@@ -2346,10 +2347,11 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
-
+	ret = ERR_PTR(-EINVAL);
+out_err:
 	kfree(xprt->slot);
 	kfree(xprt);
-	return ERR_PTR(-EINVAL);
+	return ret;
 }
 
 static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2368,6 +2370,7 @@
 	struct sockaddr *addr = args->dstaddr;
 	struct rpc_xprt *xprt;
 	struct sock_xprt *transport;
+	struct rpc_xprt *ret;
 
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))
@@ -2403,8 +2406,8 @@
 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
 		break;
 	default:
-		kfree(xprt);
-		return ERR_PTR(-EAFNOSUPPORT);
+		ret = ERR_PTR(-EAFNOSUPPORT);
+		goto out_err;
 	}
 
 	if (xprt_bound(xprt))
@@ -2420,10 +2423,11 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
-
+	ret = ERR_PTR(-EINVAL);
+out_err:
 	kfree(xprt->slot);
 	kfree(xprt);
-	return ERR_PTR(-EINVAL);
+	return ret;
 }
 
 /**
@@ -2437,6 +2441,7 @@
 	struct rpc_xprt *xprt;
 	struct sock_xprt *transport;
 	struct svc_sock *bc_sock;
+	struct rpc_xprt *ret;
 
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))
@@ -2476,8 +2481,8 @@
 				   RPCBIND_NETID_TCP6);
 		break;
 	default:
-		kfree(xprt);
-		return ERR_PTR(-EAFNOSUPPORT);
+		ret = ERR_PTR(-EAFNOSUPPORT);
+		goto out_err;
 	}
 
 	if (xprt_bound(xprt))
@@ -2499,9 +2504,11 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
+	ret = ERR_PTR(-EINVAL);
+out_err:
 	kfree(xprt->slot);
 	kfree(xprt);
-	return ERR_PTR(-EINVAL);
+	return ret;
 }
 
 static struct xprt_class	xs_udp_transport = {
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 76af5f9..a932ae5 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -242,6 +242,7 @@
 		echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
 		echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
 		echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+		echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
 		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
 		shift
 		;;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 5d4402a..38783dc 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -124,6 +124,7 @@
 extern int install_user_keyrings(void);
 extern int install_thread_keyring_to_cred(struct cred *);
 extern int install_process_keyring_to_cred(struct cred *);
+extern int install_session_keyring_to_cred(struct cred *, struct key *);
 
 extern struct key *request_key_and_link(struct key_type *type,
 					const char *description,
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 8f4dce1..13074b4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1269,7 +1269,7 @@
 		goto not_permitted;
 
 	/* the parent must be single threaded */
-	if (atomic_read(&parent->signal->count) != 1)
+	if (!thread_group_empty(parent))
 		goto not_permitted;
 
 	/* the parent and the child must have different session keyrings or
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 20a38fe..6b8e4ff 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -216,8 +216,7 @@
 /*
  * install a session keyring directly to a credentials struct
  */
-static int install_session_keyring_to_cred(struct cred *cred,
-					   struct key *keyring)
+int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
 	unsigned long flags;
 	struct key *old;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index f656e9c..f5ec9ac 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -58,6 +58,38 @@
 }
 EXPORT_SYMBOL(complete_request_key);
 
+static int umh_keys_init(struct subprocess_info *info)
+{
+	struct cred *cred = (struct cred*)current_cred();
+	struct key *keyring = info->data;
+	/*
+	 * This is called in context of freshly forked kthread before
+	 * kernel_execve(), we can just change our ->session_keyring.
+	 */
+	return install_session_keyring_to_cred(cred, keyring);
+}
+
+static void umh_keys_cleanup(struct subprocess_info *info)
+{
+	struct key *keyring = info->data;
+	key_put(keyring);
+}
+
+static int call_usermodehelper_keys(char *path, char **argv, char **envp,
+			 struct key *session_keyring, enum umh_wait wait)
+{
+	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+	struct subprocess_info *info =
+		call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
+	if (!info)
+		return -ENOMEM;
+
+	call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
+					key_get(session_keyring));
+	return call_usermodehelper_exec(info, wait);
+}
+
 /*
  * request userspace finish the construction of a key
  * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
diff --git a/usr/Makefile b/usr/Makefile
index 1e6a9e4..6b4b6da 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@
 # Lzma
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
 
+# Lzo
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO)   = .lzo
+
 # Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o
 
@@ -45,7 +48,7 @@
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
diff --git a/usr/initramfs_data.lzo.S b/usr/initramfs_data.lzo.S
new file mode 100644
index 0000000..5921190
--- /dev/null
+++ b/usr/initramfs_data.lzo.S
@@ -0,0 +1,29 @@
+/*
+  initramfs_data includes the compressed binary that is the
+  filesystem used for early user space.
+  Note: Older versions of "as" (prior to binutils 2.11.90.0.23
+  released on 2001-07-14) dit not support .incbin.
+  If you are forced to use older binutils than that then the
+  following trick can be applied to create the resulting binary:
+
+
+  ld -m elf_i386  --format binary --oformat elf32-i386 -r \
+  -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
+   ld -m elf_i386  -r -o built-in.o initramfs_data.o
+
+  initramfs_data.scr looks like this:
+SECTIONS
+{
+       .init.ramfs : { *(.data) }
+}
+
+  The above example is for i386 - the parameters vary from architectures.
+  Eventually look up LDFLAGS_BLOB in an older version of the
+  arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
+
+  Using .incbin has the advantage over ld that the correct flags are set
+  in the ELF header, as required by certain architectures.
+*/
+
+.section .init.ramfs,"a"
+.incbin "usr/initramfs_data.cpio.lzo"