Merge tag 'v3.6-rc7' into drm-intel-next-queued

Manual backmerge of -rc7 to resolve a silent conflict leading to
compile failure in drivers/gpu/drm/i915/intel_hdmi.c.

This is due to the bugfix in -rc7:

commit b98b60167279df3acac9422c3c9820d9ebbcf9fb
Author: Wang Xingchao <xingchao.wang@intel.com>
Date:   Thu Sep 13 07:43:22 2012 +0800

    drm/i915: HDMI - Clear Audio Enable bit for Hot Plug

Since this code moved around a lot in -next git put that snippet at
the wrong spot. I've tried to fix this by making the conflict explicit
by merging a version for next with:

commit 3cce574f0190dd149472059fb69267cf83d290f9
Author: Wang Xingchao <xingchao.wang@intel.com>
Date:   Thu Sep 13 11:19:00 2012 +0800

    drm/i915: HDMI - Clear Audio Enable bit for Hot Plug unconditionally

But that failed to solve the entire problem. To avoid pushing out
further -nightly branch to our QA where this is broken, do the
backmerge and manually add the stuff git adds to -next from the patch
in -fixes.

Note that this doesn't show up in git's merge diff (and hence is also
not handled by git rerere), which adds to the reasons why I'd like to
fix this with a verbose backmerge. The git merge diff only shows a
bunch of trivial conflicts of the "code changed in lines next to each
another" kind.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 196b8b9..b030052 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -6,11 +6,36 @@
   <bookinfo>
     <title>Linux DRM Developer's Guide</title>
 
+    <authorgroup>
+      <author>
+	<firstname>Jesse</firstname>
+	<surname>Barnes</surname>
+	<contrib>Initial version</contrib>
+	<affiliation>
+	  <orgname>Intel Corporation</orgname>
+	  <address>
+	    <email>jesse.barnes@intel.com</email>
+	  </address>
+	</affiliation>
+      </author>
+      <author>
+	<firstname>Laurent</firstname>
+	<surname>Pinchart</surname>
+	<contrib>Driver internals</contrib>
+	<affiliation>
+	  <orgname>Ideas on board SPRL</orgname>
+	  <address>
+	    <email>laurent.pinchart@ideasonboard.com</email>
+	  </address>
+	</affiliation>
+      </author>
+    </authorgroup>
+
     <copyright>
       <year>2008-2009</year>
-      <holder>
-	Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
-      </holder>
+      <year>2012</year>
+      <holder>Intel Corporation</holder>
+      <holder>Laurent Pinchart</holder>
     </copyright>
 
     <legalnotice>
@@ -20,6 +45,17 @@
 	the kernel source COPYING file.
       </para>
     </legalnotice>
+
+    <revhistory>
+      <!-- Put document revisions here, newest first. -->
+      <revision>
+	<revnumber>1.0</revnumber>
+	<date>2012-07-13</date>
+	<authorinitials>LP</authorinitials>
+	<revremark>Added extensive documentation about driver internals.
+	</revremark>
+      </revision>
+    </revhistory>
   </bookinfo>
 
 <toc></toc>
@@ -72,342 +108,361 @@
       submission &amp; fencing, suspend/resume support, and DMA
       services.
     </para>
-    <para>
-      The core of every DRM driver is struct drm_driver.  Drivers
-      typically statically initialize a drm_driver structure,
-      then pass it to drm_init() at load time.
-    </para>
 
   <!-- Internals: driver init -->
 
   <sect1>
-    <title>Driver initialization</title>
+    <title>Driver Initialization</title>
     <para>
-      Before calling the DRM initialization routines, the driver must
-      first create and fill out a struct drm_driver structure.
+      At the core of every DRM driver is a <structname>drm_driver</structname>
+      structure. Drivers typically statically initialize a drm_driver structure,
+      and then pass it to one of the <function>drm_*_init()</function> functions
+      to register it with the DRM subsystem.
     </para>
-    <programlisting>
-      static struct drm_driver driver = {
-	/* Don't use MTRRs here; the Xserver or userspace app should
-	 * deal with them for Intel hardware.
-	 */
-	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
-	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
-	.load = i915_driver_load,
-	.unload = i915_driver_unload,
-	.firstopen = i915_driver_firstopen,
-	.lastclose = i915_driver_lastclose,
-	.preclose = i915_driver_preclose,
-	.save = i915_save,
-	.restore = i915_restore,
-	.device_is_agp = i915_driver_device_is_agp,
-	.get_vblank_counter = i915_get_vblank_counter,
-	.enable_vblank = i915_enable_vblank,
-	.disable_vblank = i915_disable_vblank,
-	.irq_preinstall = i915_driver_irq_preinstall,
-	.irq_postinstall = i915_driver_irq_postinstall,
-	.irq_uninstall = i915_driver_irq_uninstall,
-	.irq_handler = i915_driver_irq_handler,
-	.reclaim_buffers = drm_core_reclaim_buffers,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
-	.fb_probe = intelfb_probe,
-	.fb_remove = intelfb_remove,
-	.fb_resize = intelfb_resize,
-	.master_create = i915_master_create,
-	.master_destroy = i915_master_destroy,
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = i915_debugfs_init,
-	.debugfs_cleanup = i915_debugfs_cleanup,
-#endif
-	.gem_init_object = i915_gem_init_object,
-	.gem_free_object = i915_gem_free_object,
-	.gem_vm_ops = &amp;i915_gem_vm_ops,
-	.ioctls = i915_ioctls,
-	.fops = {
-		.owner = THIS_MODULE,
-		.open = drm_open,
-		.release = drm_release,
-		.ioctl = drm_ioctl,
-		.mmap = drm_mmap,
-		.poll = drm_poll,
-		.fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-		.compat_ioctl = i915_compat_ioctl,
-#endif
-		.llseek = noop_llseek,
-		},
-	.pci_driver = {
-		.name = DRIVER_NAME,
-		.id_table = pciidlist,
-		.probe = probe,
-		.remove = __devexit_p(drm_cleanup_pci),
-		},
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-	.patchlevel = DRIVER_PATCHLEVEL,
-      };
-    </programlisting>
     <para>
-      In the example above, taken from the i915 DRM driver, the driver
-      sets several flags indicating what core features it supports;
-      we go over the individual callbacks in later sections.  Since
-      flags indicate which features your driver supports to the DRM
-      core, you need to set most of them prior to calling drm_init().  Some,
-      like DRIVER_MODESET can be set later based on user supplied parameters,
-      but that's the exception rather than the rule.
+      The <structname>drm_driver</structname> structure contains static
+      information that describes the driver and features it supports, and
+      pointers to methods that the DRM core will call to implement the DRM API.
+      We will first go through the <structname>drm_driver</structname> static
+      information fields, and will then describe individual operations in
+      details as they get used in later sections.
     </para>
-    <variablelist>
-      <title>Driver flags</title>
-      <varlistentry>
-	<term>DRIVER_USE_AGP</term>
-	<listitem><para>
-	    Driver uses AGP interface
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_REQUIRE_AGP</term>
-	<listitem><para>
-	    Driver needs AGP interface to function.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_USE_MTRR</term>
-	<listitem>
-	  <para>
-	    Driver uses MTRR interface for mapping memory.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_PCI_DMA</term>
-	<listitem><para>
-	    Driver is capable of PCI DMA.  Deprecated.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_SG</term>
-	<listitem><para>
-	    Driver can perform scatter/gather DMA.  Deprecated.
-	</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_HAVE_DMA</term>
-	<listitem><para>Driver supports DMA.  Deprecated.</para></listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
-	<listitem>
-	  <para>
-	    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
-	    handler.  DRIVER_IRQ_SHARED indicates whether the device &amp;
-	    handler support shared IRQs (note that this is required of
-	    PCI drivers).
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_DMA_QUEUE</term>
-	<listitem>
-	  <para>
-	    Should be set if the driver queues DMA requests and completes them
-	    asynchronously.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_FB_DMA</term>
-	<listitem>
-	  <para>
-	    Driver supports DMA to/from the framebuffer.  Deprecated.
-	  </para>
-	</listitem>
-      </varlistentry>
-      <varlistentry>
-	<term>DRIVER_MODESET</term>
-	<listitem>
-	  <para>
-	    Driver supports mode setting interfaces.
-	  </para>
-	</listitem>
-      </varlistentry>
-    </variablelist>
-    <para>
-      In this specific case, the driver requires AGP and supports
-      IRQs.  DMA, as discussed later, is handled by device-specific ioctls
-      in this case.  It also supports the kernel mode setting APIs, though
-      unlike in the actual i915 driver source, this example unconditionally
-      exports KMS capability.
-    </para>
+    <sect2>
+      <title>Driver Information</title>
+      <sect3>
+        <title>Driver Features</title>
+        <para>
+          Drivers inform the DRM core about their requirements and supported
+          features by setting appropriate flags in the
+          <structfield>driver_features</structfield> field. Since those flags
+          influence the DRM core behaviour since registration time, most of them
+          must be set to registering the <structname>drm_driver</structname>
+          instance.
+        </para>
+        <synopsis>u32 driver_features;</synopsis>
+        <variablelist>
+          <title>Driver Feature Flags</title>
+          <varlistentry>
+            <term>DRIVER_USE_AGP</term>
+            <listitem><para>
+              Driver uses AGP interface, the DRM core will manage AGP resources.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_REQUIRE_AGP</term>
+            <listitem><para>
+              Driver needs AGP interface to function. AGP initialization failure
+              will become a fatal error.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_USE_MTRR</term>
+            <listitem><para>
+              Driver uses MTRR interface for mapping memory, the DRM core will
+              manage MTRR resources. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_PCI_DMA</term>
+            <listitem><para>
+              Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+              userspace will be enabled. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_SG</term>
+            <listitem><para>
+              Driver can perform scatter/gather DMA, allocation and mapping of
+              scatter/gather buffers will be enabled. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_HAVE_DMA</term>
+            <listitem><para>
+              Driver supports DMA, the userspace DMA API will be supported.
+              Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+            <listitem><para>
+              DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The
+              DRM core will automatically register an interrupt handler when the
+              flag is set. DRIVER_IRQ_SHARED indicates whether the device &amp;
+              handler support shared IRQs (note that this is required of PCI
+              drivers).
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_IRQ_VBL</term>
+            <listitem><para>Unused. Deprecated.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_DMA_QUEUE</term>
+            <listitem><para>
+              Should be set if the driver queues DMA requests and completes them
+              asynchronously.  Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_FB_DMA</term>
+            <listitem><para>
+              Driver supports DMA to/from the framebuffer, mapping of frambuffer
+              DMA buffers to userspace will be supported. Deprecated.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_IRQ_VBL2</term>
+            <listitem><para>Unused. Deprecated.</para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_GEM</term>
+            <listitem><para>
+              Driver use the GEM memory manager.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_MODESET</term>
+            <listitem><para>
+              Driver supports mode setting interfaces (KMS).
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRIVER_PRIME</term>
+            <listitem><para>
+              Driver implements DRM PRIME buffer sharing.
+            </para></listitem>
+          </varlistentry>
+        </variablelist>
+      </sect3>
+      <sect3>
+        <title>Major, Minor and Patchlevel</title>
+        <synopsis>int major;
+int minor;
+int patchlevel;</synopsis>
+        <para>
+          The DRM core identifies driver versions by a major, minor and patch
+          level triplet. The information is printed to the kernel log at
+          initialization time and passed to userspace through the
+          DRM_IOCTL_VERSION ioctl.
+        </para>
+        <para>
+          The major and minor numbers are also used to verify the requested driver
+          API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
+          between minor versions, applications can call DRM_IOCTL_SET_VERSION to
+          select a specific version of the API. If the requested major isn't equal
+          to the driver major, or the requested minor is larger than the driver
+          minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
+          the driver's set_version() method will be called with the requested
+          version.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Name, Description and Date</title>
+        <synopsis>char *name;
+char *desc;
+char *date;</synopsis>
+        <para>
+          The driver name is printed to the kernel log at initialization time,
+          used for IRQ registration and passed to userspace through
+          DRM_IOCTL_VERSION.
+        </para>
+        <para>
+          The driver description is a purely informative string passed to
+          userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+          the kernel.
+        </para>
+        <para>
+          The driver date, formatted as YYYYMMDD, is meant to identify the date of
+          the latest modification to the driver. However, as most drivers fail to
+          update it, its value is mostly useless. The DRM core prints it to the
+          kernel log at initialization time and passes it to userspace through the
+          DRM_IOCTL_VERSION ioctl.
+        </para>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Driver Load</title>
+      <para>
+        The <methodname>load</methodname> method is the driver and device
+        initialization entry point. The method is responsible for allocating and
+        initializing driver private data, specifying supported performance
+        counters, performing resource allocation and mapping (e.g. acquiring
+        clocks, mapping registers or allocating command buffers), initializing
+        the memory manager (<xref linkend="drm-memory-management"/>), installing
+        the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
+        vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
+	setting (<xref linkend="drm-mode-setting"/>) and initial output
+	configuration (<xref linkend="drm-kms-init"/>).
+      </para>
+      <note><para>
+        If compatibility is a concern (e.g. with drivers converted over from
+        User Mode Setting to Kernel Mode Setting), care must be taken to prevent
+        device initialization and control that is incompatible with currently
+        active userspace drivers. For instance, if user level mode setting
+        drivers are in use, it would be problematic to perform output discovery
+        &amp; configuration at load time. Likewise, if user-level drivers
+        unaware of memory management are in use, memory management and command
+        buffer setup may need to be omitted. These requirements are
+        driver-specific, and care needs to be taken to keep both old and new
+        applications and libraries working.
+      </para></note>
+      <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
+      <para>
+        The method takes two arguments, a pointer to the newly created
+	<structname>drm_device</structname> and flags. The flags are used to
+	pass the <structfield>driver_data</structfield> field of the device id
+	corresponding to the device passed to <function>drm_*_init()</function>.
+	Only PCI devices currently use this, USB and platform DRM drivers have
+	their <methodname>load</methodname> method called with flags to 0.
+      </para>
+      <sect3>
+        <title>Driver Private &amp; Performance Counters</title>
+        <para>
+          The driver private hangs off the main
+          <structname>drm_device</structname> structure and can be used for
+          tracking various device-specific bits of information, like register
+          offsets, command buffer status, register state for suspend/resume, etc.
+          At load time, a driver may simply allocate one and set
+          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+          appropriately; it should be freed and
+          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+          set to NULL when the driver is unloaded.
+        </para>
+        <para>
+          DRM supports several counters which were used for rough performance
+          characterization. This stat counter system is deprecated and should not
+          be used. If performance monitoring is desired, the developer should
+          investigate and potentially enhance the kernel perf and tracing
+          infrastructure to export GPU related performance information for
+          consumption by performance monitoring tools and applications.
+        </para>
+      </sect3>
+      <sect3 id="drm-irq-registration">
+        <title>IRQ Registration</title>
+        <para>
+          The DRM core tries to facilitate IRQ handler registration and
+          unregistration by providing <function>drm_irq_install</function> and
+          <function>drm_irq_uninstall</function> functions. Those functions only
+          support a single interrupt per device.
+        </para>
+  <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+        <para>
+          Both functions get the device IRQ by calling
+          <function>drm_dev_to_irq</function>. This inline function will call a
+          bus-specific operation to retrieve the IRQ number. For platform devices,
+          <function>platform_get_irq</function>(..., 0) is used to retrieve the
+          IRQ number.
+        </para>
+        <para>
+          <function>drm_irq_install</function> starts by calling the
+          <methodname>irq_preinstall</methodname> driver operation. The operation
+          is optional and must make sure that the interrupt will not get fired by
+          clearing all pending interrupt flags or disabling the interrupt.
+        </para>
+        <para>
+          The IRQ will then be requested by a call to
+          <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
+          feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
+          requested.
+        </para>
+        <para>
+          The IRQ handler function must be provided as the mandatory irq_handler
+          driver operation. It will get passed directly to
+          <function>request_irq</function> and thus has the same prototype as all
+          IRQ handlers. It will get called with a pointer to the DRM device as the
+          second argument.
+        </para>
+        <para>
+          Finally the function calls the optional
+          <methodname>irq_postinstall</methodname> driver operation. The operation
+          usually enables interrupts (excluding the vblank interrupt, which is
+          enabled separately), but drivers may choose to enable/disable interrupts
+          at a different time.
+        </para>
+        <para>
+          <function>drm_irq_uninstall</function> is similarly used to uninstall an
+          IRQ handler. It starts by waking up all processes waiting on a vblank
+          interrupt to make sure they don't hang, and then calls the optional
+          <methodname>irq_uninstall</methodname> driver operation. The operation
+          must disable all hardware interrupts. Finally the function frees the IRQ
+          by calling <function>free_irq</function>.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Memory Manager Initialization</title>
+        <para>
+          Every DRM driver requires a memory manager which must be initialized at
+          load time. DRM currently contains two memory managers, the Translation
+          Table Manager (TTM) and the Graphics Execution Manager (GEM).
+          This document describes the use of the GEM memory manager only. See
+          <xref linkend="drm-memory-management"/> for details.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Miscellaneous Device Configuration</title>
+        <para>
+          Another task that may be necessary for PCI devices during configuration
+          is mapping the video BIOS. On many devices, the VBIOS describes device
+          configuration, LCD panel timings (if any), and contains flags indicating
+          device state. Mapping the BIOS can be done using the pci_map_rom() call,
+          a convenience function that takes care of mapping the actual ROM,
+          whether it has been shadowed into memory (typically at address 0xc0000)
+          or exists on the PCI device in the ROM BAR. Note that after the ROM has
+          been mapped and any necessary information has been extracted, it should
+          be unmapped; on many devices, the ROM address decoder is shared with
+          other BARs, so leaving it mapped could cause undesired behaviour like
+          hangs or memory corruption.
+  <!--!Fdrivers/pci/rom.c pci_map_rom-->
+        </para>
+      </sect3>
+    </sect2>
   </sect1>
 
-  <!-- Internals: driver load -->
+  <!-- Internals: memory management -->
 
-  <sect1>
-    <title>Driver load</title>
+  <sect1 id="drm-memory-management">
+    <title>Memory management</title>
     <para>
-      In the previous section, we saw what a typical drm_driver
-      structure might look like.  One of the more important fields in
-      the structure is the hook for the load function.
-    </para>
-    <programlisting>
-      static struct drm_driver driver = {
-      	...
-      	.load = i915_driver_load,
-        ...
-      };
-    </programlisting>
-    <para>
-      The load function has many responsibilities: allocating a driver
-      private structure, specifying supported performance counters,
-      configuring the device (e.g. mapping registers &amp; command
-      buffers), initializing the memory manager, and setting up the
-      initial output configuration.
+      Modern Linux systems require large amount of graphics memory to store
+      frame buffers, textures, vertices and other graphics-related data. Given
+      the very dynamic nature of many of that data, managing graphics memory
+      efficiently is thus crucial for the graphics stack and plays a central
+      role in the DRM infrastructure.
     </para>
     <para>
-      If compatibility is a concern (e.g. with drivers converted over
-      to the new interfaces from the old ones), care must be taken to
-      prevent device initialization and control that is incompatible with
-      currently active userspace drivers.  For instance, if user
-      level mode setting drivers are in use, it would be problematic
-      to perform output discovery &amp; configuration at load time.
-      Likewise, if user-level drivers unaware of memory management are
-      in use, memory management and command buffer setup may need to
-      be omitted.  These requirements are driver-specific, and care
-      needs to be taken to keep both old and new applications and
-      libraries working.  The i915 driver supports the "modeset"
-      module parameter to control whether advanced features are
-      enabled at load time or in legacy fashion.
+      The DRM core includes two memory managers, namely Translation Table Maps
+      (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+      manager to be developed and tried to be a one-size-fits-them all
+      solution. It provides a single userspace API to accomodate the need of
+      all hardware, supporting both Unified Memory Architecture (UMA) devices
+      and devices with dedicated video RAM (i.e. most discrete video cards).
+      This resulted in a large, complex piece of code that turned out to be
+      hard to use for driver development.
     </para>
-
+    <para>
+      GEM started as an Intel-sponsored project in reaction to TTM's
+      complexity. Its design philosophy is completely different: instead of
+      providing a solution to every graphics memory-related problems, GEM
+      identified common code between drivers and created a support library to
+      share it. GEM has simpler initialization and execution requirements than
+      TTM, but has no video RAM management capabitilies and is thus limited to
+      UMA devices.
+    </para>
     <sect2>
-      <title>Driver private &amp; performance counters</title>
+      <title>The Translation Table Manager (TTM)</title>
       <para>
-	The driver private hangs off the main drm_device structure and
-	can be used for tracking various device-specific bits of
-	information, like register offsets, command buffer status,
-	register state for suspend/resume, etc.  At load time, a
-	driver may simply allocate one and set drm_device.dev_priv
-	appropriately; it should be freed and drm_device.dev_priv set
-	to NULL when the driver is unloaded.
-      </para>
-      <para>
-	The DRM supports several counters which may be used for rough
-	performance characterization.  Note that the DRM stat counter
-	system is not often used by applications, and supporting
-	additional counters is completely optional.
-      </para>
-      <para>
-	These interfaces are deprecated and should not be used.  If performance
-	monitoring is desired, the developer should investigate and
-	potentially enhance the kernel perf and tracing infrastructure to export
-	GPU related performance information for consumption by performance
-	monitoring tools and applications.
-      </para>
-    </sect2>
-
-    <sect2>
-      <title>Configuring the device</title>
-      <para>
-	Obviously, device configuration is device-specific.
-	However, there are several common operations: finding a
-	device's PCI resources, mapping them, and potentially setting
-	up an IRQ handler.
-      </para>
-      <para>
-	Finding &amp; mapping resources is fairly straightforward.  The
-	DRM wrapper functions, drm_get_resource_start() and
-	drm_get_resource_len(), may be used to find BARs on the given
-	drm_device struct.  Once those values have been retrieved, the
-	driver load function can call drm_addmap() to create a new
-	mapping for the BAR in question.  Note that you probably want a
-	drm_local_map_t in your driver private structure to track any
-	mappings you create.
-<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
-<!-- !Finclude/drm/drmP.h drm_local_map_t -->
-      </para>
-      <para>
-	if compatibility with other operating systems isn't a concern
-	(DRM drivers can run under various BSD variants and OpenSolaris),
-	native Linux calls may be used for the above, e.g. pci_resource_*
-	and iomap*/iounmap.  See the Linux device driver book for more
-	info.
-      </para>
-      <para>
-	Once you have a register map, you may use the DRM_READn() and
-	DRM_WRITEn() macros to access the registers on your device, or
-	use driver-specific versions to offset into your MMIO space
-	relative to a driver-specific base pointer (see I915_READ for
-	an example).
-      </para>
-      <para>
-	If your device supports interrupt generation, you may want to
-	set up an interrupt handler when the driver is loaded.  This
-	is done using the drm_irq_install() function.  If your device
-	supports vertical blank interrupts, it should call
-	drm_vblank_init() to initialize the core vblank handling code before
-	enabling interrupts on your device.  This ensures the vblank related
-	structures are allocated and allows the core to handle vblank events.
-      </para>
-<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
-      <para>
-	Once your interrupt handler is registered (it uses your
-	drm_driver.irq_handler as the actual interrupt handling
-	function), you can safely enable interrupts on your device,
-	assuming any other state your interrupt handler uses is also
-	initialized.
-      </para>
-      <para>
-	Another task that may be necessary during configuration is
-	mapping the video BIOS.  On many devices, the VBIOS describes
-	device configuration, LCD panel timings (if any), and contains
-	flags indicating device state.  Mapping the BIOS can be done
-	using the pci_map_rom() call, a convenience function that
-	takes care of mapping the actual ROM, whether it has been
-	shadowed into memory (typically at address 0xc0000) or exists
-	on the PCI device in the ROM BAR.  Note that after the ROM
-	has been mapped and any necessary information has been extracted,
-	it should be unmapped; on many devices, the ROM address decoder is
-	shared with other BARs, so leaving it mapped could cause
-	undesired behavior like hangs or memory corruption.
-<!--!Fdrivers/pci/rom.c pci_map_rom-->
-      </para>
-    </sect2>
-
-    <sect2>
-      <title>Memory manager initialization</title>
-      <para>
-	In order to allocate command buffers, cursor memory, scanout
-	buffers, etc., as well as support the latest features provided
-	by packages like Mesa and the X.Org X server, your driver
-	should support a memory manager.
-      </para>
-      <para>
-	If your driver supports memory management (it should!), you
-	need to set that up at load time as well.  How you initialize
-	it depends on which memory manager you're using: TTM or GEM.
+	TTM design background and information belongs here.
       </para>
       <sect3>
 	<title>TTM initialization</title>
-	<para>
-	  TTM (for Translation Table Manager) manages video memory and
-	  aperture space for graphics devices. TTM supports both UMA devices
-	  and devices with dedicated video RAM (VRAM), i.e. most discrete
-	  graphics devices.  If your device has dedicated RAM, supporting
-	  TTM is desirable.  TTM also integrates tightly with your
-	  driver-specific buffer execution function.  See the radeon
-	  driver for examples.
-	</para>
-	<para>
-	  The core TTM structure is the ttm_bo_driver struct.  It contains
-	  several fields with function pointers for initializing the TTM,
-	  allocating and freeing memory, waiting for command completion
-	  and fence synchronization, and memory migration.  See the
-	  radeon_ttm.c file for an example of usage.
+        <warning><para>This section is outdated.</para></warning>
+        <para>
+          Drivers wishing to support TTM must fill out a drm_bo_driver
+          structure. The structure contains several fields with function
+          pointers for initializing the TTM, allocating and freeing memory,
+          waiting for command completion and fence synchronization, and memory
+          migration. See the radeon_ttm.c file for an example of usage.
 	</para>
 	<para>
 	  The ttm_global_reference structure is made up of several fields:
@@ -445,82 +500,1081 @@
 	  count for the TTM, which will call your initialization function.
 	</para>
       </sect3>
-      <sect3>
-	<title>GEM initialization</title>
-	<para>
-	  GEM is an alternative to TTM, designed specifically for UMA
-	  devices.  It has simpler initialization and execution requirements
-	  than TTM, but has no VRAM management capability.  Core GEM
-	  is initialized by calling drm_mm_init() to create
-	  a GTT DRM MM object, which provides an address space pool for
-	  object allocation.  In a KMS configuration, the driver
-	  needs to allocate and initialize a command ring buffer following
-	  core GEM initialization.  A UMA device usually has what is called a
-	  "stolen" memory region, which provides space for the initial
-	  framebuffer and large, contiguous memory regions required by the
-	  device.  This space is not typically managed by GEM, and it must
-	  be initialized separately into its own DRM MM object.
-	</para>
-	<para>
-	  Initialization is driver-specific. In the case of Intel
-	  integrated graphics chips like 965GM, GEM initialization can
-	  be done by calling the internal GEM init function,
-	  i915_gem_do_init().  Since the 965GM is a UMA device
-	  (i.e. it doesn't have dedicated VRAM), GEM manages
-	  making regular RAM available for GPU operations.  Memory set
-	  aside by the BIOS (called "stolen" memory by the i915
-	  driver) is managed by the DRM memrange allocator; the
-	  rest of the aperture is managed by GEM.
-	  <programlisting>
-	    /* Basic memrange allocator for stolen space (aka vram) */
-	    drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
-	    /* Let GEM Manage from end of prealloc space to end of aperture */
-	    i915_gem_do_init(dev, prealloc_size, agp_size);
-	  </programlisting>
-<!--!Edrivers/char/drm/drm_memrange.c-->
-	</para>
-	<para>
-	  Once the memory manager has been set up, we may allocate the
-	  command buffer.  In the i915 case, this is also done with a
-	  GEM function, i915_gem_init_ringbuffer().
-	</para>
-      </sect3>
     </sect2>
-
-    <sect2>
-      <title>Output configuration</title>
+    <sect2 id="drm-gem">
+      <title>The Graphics Execution Manager (GEM)</title>
       <para>
-	The final initialization task is output configuration.  This involves:
+        The GEM design approach has resulted in a memory manager that doesn't
+        provide full coverage of all (or even all common) use cases in its
+        userspace or kernel API. GEM exposes a set of standard memory-related
+        operations to userspace and a set of helper functions to drivers, and let
+        drivers implement hardware-specific operations with their own private API.
+      </para>
+      <para>
+        The GEM userspace API is described in the
+        <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
+        Execution Manager</citetitle></ulink> article on LWN. While slightly
+        outdated, the document provides a good overview of the GEM API principles.
+        Buffer allocation and read and write operations, described as part of the
+        common GEM API, are currently implemented using driver-specific ioctls.
+      </para>
+      <para>
+        GEM is data-agnostic. It manages abstract buffer objects without knowing
+        what individual buffers contain. APIs that require knowledge of buffer
+        contents or purpose, such as buffer allocation or synchronization
+        primitives, are thus outside of the scope of GEM and must be implemented
+        using driver-specific ioctls.
+      </para>
+      <para>
+	On a fundamental level, GEM involves several operations:
 	<itemizedlist>
-	  <listitem>
-	    Finding and initializing the CRTCs, encoders, and connectors
-	    for the device.
-	  </listitem>
-	  <listitem>
-	    Creating an initial configuration.
-	  </listitem>
-	  <listitem>
-	    Registering a framebuffer console driver.
-	  </listitem>
+	  <listitem>Memory allocation and freeing</listitem>
+	  <listitem>Command execution</listitem>
+	  <listitem>Aperture management at command execution time</listitem>
 	</itemizedlist>
+	Buffer object allocation is relatively straightforward and largely
+        provided by Linux's shmem layer, which provides memory to back each
+        object.
+      </para>
+      <para>
+        Device-specific operations, such as command execution, pinning, buffer
+	read &amp; write, mapping, and domain ownership transfers are left to
+        driver-specific ioctls.
       </para>
       <sect3>
-	<title>Output discovery and initialization</title>
-	<para>
-	  Several core functions exist to create CRTCs, encoders, and
-	  connectors, namely: drm_crtc_init(), drm_connector_init(), and
-	  drm_encoder_init(), along with several "helper" functions to
-	  perform common tasks.
+        <title>GEM Initialization</title>
+        <para>
+          Drivers that use GEM must set the DRIVER_GEM bit in the struct
+          <structname>drm_driver</structname>
+          <structfield>driver_features</structfield> field. The DRM core will
+          then automatically initialize the GEM core before calling the
+          <methodname>load</methodname> operation. Behind the scene, this will
+          create a DRM Memory Manager object which provides an address space
+          pool for object allocation.
+        </para>
+        <para>
+          In a KMS configuration, drivers need to allocate and initialize a
+          command ring buffer following core GEM initialization if required by
+          the hardware. UMA devices usually have what is called a "stolen"
+          memory region, which provides space for the initial framebuffer and
+          large, contiguous memory regions required by the device. This space is
+          typically not managed by GEM, and must be initialized separately into
+          its own DRM MM object.
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Creation</title>
+        <para>
+          GEM splits creation of GEM objects and allocation of the memory that
+          backs them in two distinct operations.
+        </para>
+        <para>
+          GEM objects are represented by an instance of struct
+          <structname>drm_gem_object</structname>. Drivers usually need to extend
+          GEM objects with private information and thus create a driver-specific
+          GEM object structure type that embeds an instance of struct
+          <structname>drm_gem_object</structname>.
+        </para>
+        <para>
+          To create a GEM object, a driver allocates memory for an instance of its
+          specific GEM object type and initializes the embedded struct
+          <structname>drm_gem_object</structname> with a call to
+          <function>drm_gem_object_init</function>. The function takes a pointer to
+          the DRM device, a pointer to the GEM object and the buffer object size
+          in bytes.
+        </para>
+        <para>
+          GEM uses shmem to allocate anonymous pageable memory.
+          <function>drm_gem_object_init</function> will create an shmfs file of
+          the requested size and store it into the struct
+          <structname>drm_gem_object</structname> <structfield>filp</structfield>
+          field. The memory is used as either main storage for the object when the
+          graphics hardware uses system memory directly or as a backing store
+          otherwise.
+        </para>
+        <para>
+          Drivers are responsible for the actual physical pages allocation by
+          calling <function>shmem_read_mapping_page_gfp</function> for each page.
+          Note that they can decide to allocate pages when initializing the GEM
+          object, or to delay allocation until the memory is needed (for instance
+          when a page fault occurs as a result of a userspace memory access or
+          when the driver needs to start a DMA transfer involving the memory).
+        </para>
+        <para>
+          Anonymous pageable memory allocation is not always desired, for instance
+          when the hardware requires physically contiguous system memory as is
+          often the case in embedded devices. Drivers can create GEM objects with
+          no shmfs backing (called private GEM objects) by initializing them with
+          a call to <function>drm_gem_private_object_init</function> instead of
+          <function>drm_gem_object_init</function>. Storage for private GEM
+          objects must be managed by drivers.
+        </para>
+        <para>
+          Drivers that do not need to extend GEM objects with private information
+          can call the <function>drm_gem_object_alloc</function> function to
+          allocate and initialize a struct <structname>drm_gem_object</structname>
+          instance. The GEM core will call the optional driver
+          <methodname>gem_init_object</methodname> operation after initializing
+          the GEM object with <function>drm_gem_object_init</function>.
+          <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis>
+        </para>
+        <para>
+          No alloc-and-init function exists for private GEM objects.
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Lifetime</title>
+        <para>
+          All GEM objects are reference-counted by the GEM core. References can be
+          acquired and release by <function>calling drm_gem_object_reference</function>
+          and <function>drm_gem_object_unreference</function> respectively. The
+          caller must hold the <structname>drm_device</structname>
+          <structfield>struct_mutex</structfield> lock. As a convenience, GEM
+          provides the <function>drm_gem_object_reference_unlocked</function> and
+          <function>drm_gem_object_unreference_unlocked</function> functions that
+          can be called without holding the lock.
+        </para>
+        <para>
+          When the last reference to a GEM object is released the GEM core calls
+          the <structname>drm_driver</structname>
+          <methodname>gem_free_object</methodname> operation. That operation is
+          mandatory for GEM-enabled drivers and must free the GEM object and all
+          associated resources.
+        </para>
+        <para>
+          <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
+          Drivers are responsible for freeing all GEM object resources, including
+          the resources created by the GEM core. If an mmap offset has been
+          created for the object (in which case
+          <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield>
+          is not NULL) it must be freed by a call to
+          <function>drm_gem_free_mmap_offset</function>. The shmfs backing store
+          must be released by calling <function>drm_gem_object_release</function>
+          (that function can safely be called if no shmfs backing store has been
+          created).
+        </para>
+      </sect3>
+      <sect3>
+        <title>GEM Objects Naming</title>
+        <para>
+          Communication between userspace and the kernel refers to GEM objects
+          using local handles, global names or, more recently, file descriptors.
+          All of those are 32-bit integer values; the usual Linux kernel limits
+          apply to the file descriptors.
+        </para>
+        <para>
+          GEM handles are local to a DRM file. Applications get a handle to a GEM
+          object through a driver-specific ioctl, and can use that handle to refer
+          to the GEM object in other standard or driver-specific ioctls. Closing a
+          DRM file handle frees all its GEM handles and dereferences the
+          associated GEM objects.
+        </para>
+        <para>
+          To create a handle for a GEM object drivers call
+          <function>drm_gem_handle_create</function>. The function takes a pointer
+          to the DRM file and the GEM object and returns a locally unique handle.
+          When the handle is no longer needed drivers delete it with a call to
+          <function>drm_gem_handle_delete</function>. Finally the GEM object
+          associated with a handle can be retrieved by a call to
+          <function>drm_gem_object_lookup</function>.
+        </para>
+        <para>
+          Handles don't take ownership of GEM objects, they only take a reference
+          to the object that will be dropped when the handle is destroyed. To
+          avoid leaking GEM objects, drivers must make sure they drop the
+          reference(s) they own (such as the initial reference taken at object
+          creation time) as appropriate, without any special consideration for the
+          handle. For example, in the particular case of combined GEM object and
+          handle creation in the implementation of the
+          <methodname>dumb_create</methodname> operation, drivers must drop the
+          initial reference to the GEM object before returning the handle.
+        </para>
+        <para>
+          GEM names are similar in purpose to handles but are not local to DRM
+          files. They can be passed between processes to reference a GEM object
+          globally. Names can't be used directly to refer to objects in the DRM
+          API, applications must convert handles to names and names to handles
+          using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+          respectively. The conversion is handled by the DRM core without any
+          driver-specific support.
+        </para>
+        <para>
+          Similar to global names, GEM file descriptors are also used to share GEM
+          objects across processes. They offer additional security: as file
+          descriptors must be explictly sent over UNIX domain sockets to be shared
+          between applications, they can't be guessed like the globally unique GEM
+          names.
+        </para>
+        <para>
+          Drivers that support GEM file descriptors, also known as the DRM PRIME
+          API, must set the DRIVER_PRIME bit in the struct
+          <structname>drm_driver</structname>
+          <structfield>driver_features</structfield> field, and implement the
+          <methodname>prime_handle_to_fd</methodname> and
+          <methodname>prime_fd_to_handle</methodname> operations.
+        </para>
+        <para>
+          <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+                            struct drm_file *file_priv, uint32_t handle,
+                            uint32_t flags, int *prime_fd);
+  int (*prime_fd_to_handle)(struct drm_device *dev,
+                            struct drm_file *file_priv, int prime_fd,
+                            uint32_t *handle);</synopsis>
+          Those two operations convert a handle to a PRIME file descriptor and
+          vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+          to manage the PRIME file descriptors.
+        </para>
+        <para>
+          While non-GEM drivers must implement the operations themselves, GEM
+          drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+          and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+          Those helpers rely on the driver
+          <methodname>gem_prime_export</methodname> and
+          <methodname>gem_prime_import</methodname> operations to create a dma-buf
+          instance from a GEM object (dma-buf exporter role) and to create a GEM
+          object from a dma-buf instance (dma-buf importer role).
+        </para>
+        <para>
+          <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+                                       struct drm_gem_object *obj,
+                                       int flags);
+  struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+                                              struct dma_buf *dma_buf);</synopsis>
+          These two operations are mandatory for GEM drivers that support DRM
+          PRIME.
+        </para>
+      </sect3>
+      <sect3 id="drm-gem-objects-mapping">
+        <title>GEM Objects Mapping</title>
+        <para>
+          Because mapping operations are fairly heavyweight GEM favours
+          read/write-like access to buffers, implemented through driver-specific
+          ioctls, over mapping buffers to userspace. However, when random access
+          to the buffer is needed (to perform software rendering for instance),
+          direct access to the object can be more efficient.
+        </para>
+        <para>
+          The mmap system call can't be used directly to map GEM objects, as they
+          don't have their own file handle. Two alternative methods currently
+          co-exist to map GEM objects to userspace. The first method uses a
+          driver-specific ioctl to perform the mapping operation, calling
+          <function>do_mmap</function> under the hood. This is often considered
+          dubious, seems to be discouraged for new GEM-enabled drivers, and will
+          thus not be described here.
+        </para>
+        <para>
+          The second method uses the mmap system call on the DRM file handle.
+          <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+             off_t offset);</synopsis>
+          DRM identifies the GEM object to be mapped by a fake offset passed
+          through the mmap offset argument. Prior to being mapped, a GEM object
+          must thus be associated with a fake offset. To do so, drivers must call
+          <function>drm_gem_create_mmap_offset</function> on the object. The
+          function allocates a fake offset range from a pool and stores the
+          offset divided by PAGE_SIZE in
+          <literal>obj-&gt;map_list.hash.key</literal>. Care must be taken not to
+          call <function>drm_gem_create_mmap_offset</function> if a fake offset
+          has already been allocated for the object. This can be tested by
+          <literal>obj-&gt;map_list.map</literal> being non-NULL.
+        </para>
+        <para>
+          Once allocated, the fake offset value
+          (<literal>obj-&gt;map_list.hash.key &lt;&lt; PAGE_SHIFT</literal>)
+          must be passed to the application in a driver-specific way and can then
+          be used as the mmap offset argument.
+        </para>
+        <para>
+          The GEM core provides a helper method <function>drm_gem_mmap</function>
+          to handle object mapping. The method can be set directly as the mmap
+          file operation handler. It will look up the GEM object based on the
+          offset value and set the VMA operations to the
+          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+          field. Note that <function>drm_gem_mmap</function> doesn't map memory to
+          userspace, but relies on the driver-provided fault handler to map pages
+          individually.
+        </para>
+        <para>
+          To use <function>drm_gem_mmap</function>, drivers must fill the struct
+          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+          field with a pointer to VM operations.
+        </para>
+        <para>
+          <synopsis>struct vm_operations_struct *gem_vm_ops
+
+  struct vm_operations_struct {
+          void (*open)(struct vm_area_struct * area);
+          void (*close)(struct vm_area_struct * area);
+          int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+  };</synopsis>
+        </para>
+        <para>
+          The <methodname>open</methodname> and <methodname>close</methodname>
+          operations must update the GEM object reference count. Drivers can use
+          the <function>drm_gem_vm_open</function> and
+          <function>drm_gem_vm_close</function> helper functions directly as open
+          and close handlers.
+        </para>
+        <para>
+          The fault operation handler is responsible for mapping individual pages
+          to userspace when a page fault occurs. Depending on the memory
+          allocation scheme, drivers can allocate pages at fault time, or can
+          decide to allocate memory for the GEM object at the time the object is
+          created.
+        </para>
+        <para>
+          Drivers that want to map the GEM object upfront instead of handling page
+          faults can implement their own mmap file operation handler.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Dumb GEM Objects</title>
+        <para>
+          The GEM API doesn't standardize GEM objects creation and leaves it to
+          driver-specific ioctls. While not an issue for full-fledged graphics
+          stacks that include device-specific userspace components (in libdrm for
+          instance), this limit makes DRM-based early boot graphics unnecessarily
+          complex.
+        </para>
+        <para>
+          Dumb GEM objects partly alleviate the problem by providing a standard
+          API to create dumb buffers suitable for scanout, which can then be used
+          to create KMS frame buffers.
+        </para>
+        <para>
+          To support dumb GEM objects drivers must implement the
+          <methodname>dumb_create</methodname>,
+          <methodname>dumb_destroy</methodname> and
+          <methodname>dumb_map_offset</methodname> operations.
+        </para>
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
+                     struct drm_mode_create_dumb *args);</synopsis>
+            <para>
+              The <methodname>dumb_create</methodname> operation creates a GEM
+              object suitable for scanout based on the width, height and depth
+              from the struct <structname>drm_mode_create_dumb</structname>
+              argument. It fills the argument's <structfield>handle</structfield>,
+              <structfield>pitch</structfield> and <structfield>size</structfield>
+              fields with a handle for the newly created GEM object and its line
+              pitch and size in bytes.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
+                      uint32_t handle);</synopsis>
+            <para>
+              The <methodname>dumb_destroy</methodname> operation destroys a dumb
+              GEM object created by <methodname>dumb_create</methodname>.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
+                         uint32_t handle, uint64_t *offset);</synopsis>
+            <para>
+              The <methodname>dumb_map_offset</methodname> operation associates an
+              mmap fake offset with the GEM object given by the handle and returns
+              it. Drivers must use the
+              <function>drm_gem_create_mmap_offset</function> function to
+              associate the fake offset as described in
+              <xref linkend="drm-gem-objects-mapping"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+      <sect3>
+        <title>Memory Coherency</title>
+        <para>
+          When mapped to the device or used in a command buffer, backing pages
+          for an object are flushed to memory and marked write combined so as to
+          be coherent with the GPU. Likewise, if the CPU accesses an object
+          after the GPU has finished rendering to the object, then the object
+          must be made coherent with the CPU's view of memory, usually involving
+          GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
+          coherency management is provided by a device-specific ioctl, which
+          evaluates an object's current domain and performs any necessary
+          flushing or synchronization to put the object into the desired
+          coherency domain (note that the object may be busy, i.e. an active
+          render target; in that case, setting the domain blocks the client and
+          waits for rendering to complete before performing any necessary
+          flushing operations).
+        </para>
+      </sect3>
+      <sect3>
+        <title>Command Execution</title>
+        <para>
+	  Perhaps the most important GEM function for GPU devices is providing a
+          command execution interface to clients. Client programs construct
+          command buffers containing references to previously allocated memory
+          objects, and then submit them to GEM. At that point, GEM takes care to
+          bind all the objects into the GTT, execute the buffer, and provide
+          necessary synchronization between clients accessing the same buffers.
+          This often involves evicting some objects from the GTT and re-binding
+          others (a fairly expensive operation), and providing relocation
+          support which hides fixed GTT offsets from clients. Clients must take
+          care not to submit command buffers that reference more objects than
+          can fit in the GTT; otherwise, GEM will reject them and no rendering
+          will occur. Similarly, if several objects in the buffer require fence
+          registers to be allocated for correct rendering (e.g. 2D blits on
+          pre-965 chips), care must be taken not to require more fence registers
+          than are available to the client. Such resource management should be
+          abstracted from the client in libdrm.
+        </para>
+      </sect3>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: mode setting -->
+
+  <sect1 id="drm-mode-setting">
+    <title>Mode Setting</title>
+    <para>
+      Drivers must initialize the mode setting core by calling
+      <function>drm_mode_config_init</function> on the DRM device. The function
+      initializes the <structname>drm_device</structname>
+      <structfield>mode_config</structfield> field and never fails. Once done,
+      mode configuration must be setup by initializing the following fields.
+    </para>
+    <itemizedlist>
+      <listitem>
+        <synopsis>int min_width, min_height;
+int max_width, max_height;</synopsis>
+        <para>
+	  Minimum and maximum width and height of the frame buffers in pixel
+	  units.
 	</para>
-	<para>
-	  Connectors should be registered with sysfs once they've been
-	  detected and initialized, using the
-	  drm_sysfs_connector_add() function.  Likewise, when they're
-	  removed from the system, they should be destroyed with
-	  drm_sysfs_connector_remove().
-	</para>
-	<programlisting>
-<![CDATA[
+      </listitem>
+      <listitem>
+        <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
+	<para>Mode setting functions.</para>
+      </listitem>
+    </itemizedlist>
+    <sect2>
+      <title>Frame Buffer Creation</title>
+      <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+				     struct drm_file *file_priv,
+				     struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>
+      <para>
+        Frame buffers are abstract memory objects that provide a source of
+        pixels to scanout to a CRTC. Applications explicitly request the
+        creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
+        receive an opaque handle that can be passed to the KMS CRTC control,
+        plane configuration and page flip functions.
+      </para>
+      <para>
+        Frame buffers rely on the underneath memory manager for low-level memory
+        operations. When creating a frame buffer applications pass a memory
+        handle (or a list of memory handles for multi-planar formats) through
+        the <parameter>drm_mode_fb_cmd2</parameter> argument. This document
+        assumes that the driver uses GEM, those handles thus reference GEM
+        objects.
+      </para>
+      <para>
+        Drivers must first validate the requested frame buffer parameters passed
+        through the mode_cmd argument. In particular this is where invalid
+        sizes, pixel formats or pitches can be caught.
+      </para>
+      <para>
+        If the parameters are deemed valid, drivers then create, initialize and
+        return an instance of struct <structname>drm_framebuffer</structname>.
+        If desired the instance can be embedded in a larger driver-specific
+        structure. The new instance is initialized with a call to
+        <function>drm_framebuffer_init</function> which takes a pointer to DRM
+        frame buffer operations (struct
+        <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
+		     struct drm_file *file_priv, unsigned int *handle);</synopsis>
+            <para>
+              Create a handle to the frame buffer underlying memory object. If
+              the frame buffer uses a multi-plane format, the handle will
+              reference the memory object associated with the first plane.
+            </para>
+            <para>
+              Drivers call <function>drm_gem_handle_create</function> to create
+              the handle.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis>
+            <para>
+              Destroy the frame buffer object and frees all associated
+              resources. Drivers must call
+              <function>drm_framebuffer_cleanup</function> to free resources
+              allocated by the DRM core for the frame buffer object, and must
+              make sure to unreference all memory objects associated with the
+              frame buffer. Handles created by the
+              <methodname>create_handle</methodname> operation are released by
+              the DRM core.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer,
+	     struct drm_file *file_priv, unsigned flags, unsigned color,
+	     struct drm_clip_rect *clips, unsigned num_clips);</synopsis>
+            <para>
+              This optional operation notifies the driver that a region of the
+              frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
+              ioctl call.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </para>
+      <para>
+        After initializing the <structname>drm_framebuffer</structname>
+        instance drivers must fill its <structfield>width</structfield>,
+        <structfield>height</structfield>, <structfield>pitches</structfield>,
+        <structfield>offsets</structfield>, <structfield>depth</structfield>,
+        <structfield>bits_per_pixel</structfield> and
+        <structfield>pixel_format</structfield> fields from the values passed
+        through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
+        should call the <function>drm_helper_mode_fill_fb_struct</function>
+        helper function to do so.
+      </para>
+    </sect2>
+    <sect2>
+      <title>Output Polling</title>
+      <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
+      <para>
+        This operation notifies the driver that the status of one or more
+        connectors has changed. Drivers that use the fb helper can just call the
+        <function>drm_fb_helper_hotplug_event</function> function to handle this
+        operation.
+      </para>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: kms initialization and cleanup -->
+
+  <sect1 id="drm-kms-init">
+    <title>KMS Initialization and Cleanup</title>
+    <para>
+      A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
+      and connectors. KMS drivers must thus create and initialize all those
+      objects at load time after initializing mode setting.
+    </para>
+    <sect2>
+      <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
+      <para>
+        A CRTC is an abstraction representing a part of the chip that contains a
+	pointer to a scanout buffer. Therefore, the number of CRTCs available
+	determines how many independent scanout buffers can be active at any
+	given time. The CRTC structure contains several fields to support this:
+	a pointer to some video memory (abstracted as a frame buffer object), a
+	display mode, and an (x, y) offset into the video memory to support
+	panning or configurations where one piece of video memory spans multiple
+	CRTCs.
+      </para>
+      <sect3>
+        <title>CRTC Initialization</title>
+        <para>
+          A KMS device must create and register at least one struct
+          <structname>drm_crtc</structname> instance. The instance is allocated
+          and zeroed by the driver, possibly as part of a larger structure, and
+          registered with a call to <function>drm_crtc_init</function> with a
+          pointer to CRTC functions.
+        </para>
+      </sect3>
+      <sect3>
+        <title>CRTC Operations</title>
+        <sect4>
+          <title>Set Configuration</title>
+          <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis>
+          <para>
+            Apply a new CRTC configuration to the device. The configuration
+            specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
+            the frame buffer, a display mode and an array of connectors to drive
+            with the CRTC if possible.
+          </para>
+          <para>
+            If the frame buffer specified in the configuration is NULL, the driver
+            must detach all encoders connected to the CRTC and all connectors
+            attached to those encoders and disable them.
+          </para>
+          <para>
+            This operation is called with the mode config lock held.
+          </para>
+          <note><para>
+            FIXME: How should set_config interact with DPMS? If the CRTC is
+            suspended, should it be resumed?
+          </para></note>
+        </sect4>
+        <sect4>
+          <title>Page Flipping</title>
+          <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                   struct drm_pending_vblank_event *event);</synopsis>
+          <para>
+            Schedule a page flip to the given frame buffer for the CRTC. This
+            operation is called with the mode config mutex held.
+          </para>
+          <para>
+            Page flipping is a synchronization mechanism that replaces the frame
+            buffer being scanned out by the CRTC with a new frame buffer during
+            vertical blanking, avoiding tearing. When an application requests a page
+            flip the DRM core verifies that the new frame buffer is large enough to
+            be scanned out by  the CRTC in the currently configured mode and then
+            calls the CRTC <methodname>page_flip</methodname> operation with a
+            pointer to the new frame buffer.
+          </para>
+          <para>
+            The <methodname>page_flip</methodname> operation schedules a page flip.
+            Once any pending rendering targetting the new frame buffer has
+            completed, the CRTC will be reprogrammed to display that frame buffer
+            after the next vertical refresh. The operation must return immediately
+            without waiting for rendering or page flip to complete and must block
+            any new rendering to the frame buffer until the page flip completes.
+          </para>
+          <para>
+            If a page flip is already pending, the
+            <methodname>page_flip</methodname> operation must return
+            -<errorname>EBUSY</errorname>.
+          </para>
+          <para>
+            To synchronize page flip to vertical blanking the driver will likely
+            need to enable vertical blanking interrupts. It should call
+            <function>drm_vblank_get</function> for that purpose, and call
+            <function>drm_vblank_put</function> after the page flip completes.
+          </para>
+          <para>
+            If the application has requested to be notified when page flip completes
+            the <methodname>page_flip</methodname> operation will be called with a
+            non-NULL <parameter>event</parameter> argument pointing to a
+            <structname>drm_pending_vblank_event</structname> instance. Upon page
+            flip completion the driver must fill the
+            <parameter>event</parameter>::<structfield>event</structfield>
+            <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
+            and <structfield>tv_usec</structfield> fields with the associated
+            vertical blanking count and timestamp, add the event to the
+            <parameter>drm_file</parameter> list of events to be signaled, and wake
+            up any waiting process. This can be performed with
+            <programlisting><![CDATA[
+            struct timeval now;
+
+            event->event.sequence = drm_vblank_count_and_time(..., &now);
+            event->event.tv_sec = now.tv_sec;
+            event->event.tv_usec = now.tv_usec;
+
+            spin_lock_irqsave(&dev->event_lock, flags);
+            list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+            wake_up_interruptible(&event->base.file_priv->event_wait);
+            spin_unlock_irqrestore(&dev->event_lock, flags);
+            ]]></programlisting>
+          </para>
+          <note><para>
+            FIXME: Could drivers that don't need to wait for rendering to complete
+            just add the event to <literal>dev-&gt;vblank_event_list</literal> and
+            let the DRM core handle everything, as for "normal" vertical blanking
+            events?
+          </para></note>
+          <para>
+            While waiting for the page flip to complete, the
+            <literal>event-&gt;base.link</literal> list head can be used freely by
+            the driver to store the pending event in a driver-specific list.
+          </para>
+          <para>
+            If the file handle is closed before the event is signaled, drivers must
+            take care to destroy the event in their
+            <methodname>preclose</methodname> operation (and, if needed, call
+            <function>drm_vblank_put</function>).
+          </para>
+        </sect4>
+        <sect4>
+          <title>Miscellaneous</title>
+          <itemizedlist>
+            <listitem>
+              <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                        uint32_t start, uint32_t size);</synopsis>
+              <para>
+                Apply a gamma table to the device. The operation is optional.
+              </para>
+            </listitem>
+            <listitem>
+              <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis>
+              <para>
+                Destroy the CRTC when not needed anymore. See
+                <xref linkend="drm-kms-init"/>.
+              </para>
+            </listitem>
+          </itemizedlist>
+        </sect4>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Planes (struct <structname>drm_plane</structname>)</title>
+      <para>
+        A plane represents an image source that can be blended with or overlayed
+	on top of a CRTC during the scanout process. Planes are associated with
+	a frame buffer to crop a portion of the image memory (source) and
+	optionally scale it to a destination size. The result is then blended
+	with or overlayed on top of a CRTC.
+      </para>
+      <sect3>
+        <title>Plane Initialization</title>
+        <para>
+          Planes are optional. To create a plane, a KMS drivers allocates and
+          zeroes an instances of struct <structname>drm_plane</structname>
+          (possibly as part of a larger structure) and registers it with a call
+          to <function>drm_plane_init</function>. The function takes a bitmask
+          of the CRTCs that can be associated with the plane, a pointer to the
+          plane functions and a list of format supported formats.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Plane Operations</title>
+        <itemizedlist>
+          <listitem>
+            <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
+                        struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                        unsigned int crtc_w, unsigned int crtc_h,
+                        uint32_t src_x, uint32_t src_y,
+                        uint32_t src_w, uint32_t src_h);</synopsis>
+            <para>
+              Enable and configure the plane to use the given CRTC and frame buffer.
+            </para>
+            <para>
+              The source rectangle in frame buffer memory coordinates is given by
+              the <parameter>src_x</parameter>, <parameter>src_y</parameter>,
+              <parameter>src_w</parameter> and <parameter>src_h</parameter>
+              parameters (as 16.16 fixed point values). Devices that don't support
+              subpixel plane coordinates can ignore the fractional part.
+            </para>
+            <para>
+              The destination rectangle in CRTC coordinates is given by the
+              <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>,
+              <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter>
+              parameters (as integer values). Devices scale the source rectangle to
+              the destination rectangle. If scaling is not supported, and the source
+              rectangle size doesn't match the destination rectangle size, the
+              driver must return a -<errorname>EINVAL</errorname> error.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis>
+            <para>
+              Disable the plane. The DRM core calls this method in response to a
+              DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
+              Disabled planes must not be processed by the CRTC.
+            </para>
+          </listitem>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis>
+            <para>
+              Destroy the plane when not needed anymore. See
+              <xref linkend="drm-kms-init"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Encoders (struct <structname>drm_encoder</structname>)</title>
+      <para>
+        An encoder takes pixel data from a CRTC and converts it to a format
+	suitable for any attached connectors. On some devices, it may be
+	possible to have a CRTC send data to more than one encoder. In that
+	case, both encoders would receive data from the same scanout buffer,
+	resulting in a "cloned" display configuration across the connectors
+	attached to each encoder.
+      </para>
+      <sect3>
+        <title>Encoder Initialization</title>
+        <para>
+          As for CRTCs, a KMS driver must create, initialize and register at
+          least one struct <structname>drm_encoder</structname> instance. The
+          instance is allocated and zeroed by the driver, possibly as part of a
+          larger structure.
+        </para>
+        <para>
+          Drivers must initialize the struct <structname>drm_encoder</structname>
+          <structfield>possible_crtcs</structfield> and
+          <structfield>possible_clones</structfield> fields before registering the
+          encoder. Both fields are bitmasks of respectively the CRTCs that the
+          encoder can be connected to, and sibling encoders candidate for cloning.
+        </para>
+        <para>
+          After being initialized, the encoder must be registered with a call to
+          <function>drm_encoder_init</function>. The function takes a pointer to
+          the encoder functions and an encoder type. Supported types are
+          <itemizedlist>
+            <listitem>
+              DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+              </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_LVDS for display panels
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
+              SCART)
+            </listitem>
+            <listitem>
+              DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+            </listitem>
+          </itemizedlist>
+        </para>
+        <para>
+          Encoders must be attached to a CRTC to be used. DRM drivers leave
+          encoders unattached at initialization time. Applications (or the fbdev
+          compatibility layer when implemented) are responsible for attaching the
+          encoders they want to use to a CRTC.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Encoder Operations</title>
+        <itemizedlist>
+          <listitem>
+            <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis>
+            <para>
+              Called to destroy the encoder when not needed anymore. See
+              <xref linkend="drm-kms-init"/>.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Connectors (struct <structname>drm_connector</structname>)</title>
+      <para>
+        A connector is the final destination for pixel data on a device, and
+	usually connects directly to an external display device like a monitor
+	or laptop panel. A connector can only be attached to one encoder at a
+	time. The connector is also the structure where information about the
+	attached display is kept, so it contains fields for display data, EDID
+	data, DPMS &amp; connection status, and information about modes
+	supported on the attached displays.
+      </para>
+      <sect3>
+        <title>Connector Initialization</title>
+        <para>
+          Finally a KMS driver must create, initialize, register and attach at
+          least one struct <structname>drm_connector</structname> instance. The
+          instance is created as other KMS objects and initialized by setting the
+          following fields.
+        </para>
+        <variablelist>
+          <varlistentry>
+            <term><structfield>interlace_allowed</structfield></term>
+            <listitem><para>
+              Whether the connector can handle interlaced modes.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><structfield>doublescan_allowed</structfield></term>
+            <listitem><para>
+              Whether the connector can handle doublescan.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term><structfield>display_info
+            </structfield></term>
+            <listitem><para>
+              Display information is filled from EDID information when a display
+              is detected. For non hot-pluggable displays such as flat panels in
+              embedded systems, the driver should initialize the
+              <structfield>display_info</structfield>.<structfield>width_mm</structfield>
+              and
+              <structfield>display_info</structfield>.<structfield>height_mm</structfield>
+              fields with the physical size of the display.
+            </para></listitem>
+          </varlistentry>
+          <varlistentry>
+            <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
+            <listitem><para>
+              Connector polling mode, a combination of
+              <variablelist>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_HPD</term>
+                  <listitem><para>
+                    The connector generates hotplug events and doesn't need to be
+                    periodically polled. The CONNECT and DISCONNECT flags must not
+                    be set together with the HPD flag.
+                  </para></listitem>
+                </varlistentry>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_CONNECT</term>
+                  <listitem><para>
+                    Periodically poll the connector for connection.
+                  </para></listitem>
+                </varlistentry>
+                <varlistentry>
+                  <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
+                  <listitem><para>
+                    Periodically poll the connector for disconnection.
+                  </para></listitem>
+                </varlistentry>
+              </variablelist>
+              Set to 0 for connectors that don't support connection status
+              discovery.
+            </para></listitem>
+          </varlistentry>
+        </variablelist>
+        <para>
+          The connector is then registered with a call to
+          <function>drm_connector_init</function> with a pointer to the connector
+          functions and a connector type, and exposed through sysfs with a call to
+          <function>drm_sysfs_connector_add</function>.
+        </para>
+        <para>
+          Supported connector types are
+          <itemizedlist>
+            <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
+            <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
+            <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
+            <listitem>DRM_MODE_CONNECTOR_Component</listitem>
+            <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
+            <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
+            <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
+            <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
+            <listitem>DRM_MODE_CONNECTOR_TV</listitem>
+            <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
+            <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
+          </itemizedlist>
+        </para>
+        <para>
+          Connectors must be attached to an encoder to be used. For devices that
+          map connectors to encoders 1:1, the connector should be attached at
+          initialization time with a call to
+          <function>drm_mode_connector_attach_encoder</function>. The driver must
+          also set the <structname>drm_connector</structname>
+          <structfield>encoder</structfield> field to point to the attached
+          encoder.
+        </para>
+        <para>
+          Finally, drivers must initialize the connectors state change detection
+          with a call to <function>drm_kms_helper_poll_init</function>. If at
+          least one connector is pollable but can't generate hotplug interrupts
+          (indicated by the DRM_CONNECTOR_POLL_CONNECT and
+          DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+          automatically be queued to periodically poll for changes. Connectors
+          that can generate hotplug interrupts must be marked with the
+          DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+          call <function>drm_helper_hpd_irq_event</function>. The function will
+          queue a delayed work to check the state of all connectors, but no
+          periodic polling will be done.
+        </para>
+      </sect3>
+      <sect3>
+        <title>Connector Operations</title>
+        <note><para>
+          Unless otherwise state, all operations are mandatory.
+        </para></note>
+        <sect4>
+          <title>DPMS</title>
+          <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
+          <para>
+            The DPMS operation sets the power state of a connector. The mode
+            argument is one of
+            <itemizedlist>
+              <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
+              <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
+            </itemizedlist>
+          </para>
+          <para>
+            In all but DPMS_ON mode the encoder to which the connector is attached
+            should put the display in low-power mode by driving its signals
+            appropriately. If more than one connector is attached to the encoder
+            care should be taken not to change the power state of other displays as
+            a side effect. Low-power mode should be propagated to the encoders and
+            CRTCs when all related connectors are put in low-power mode.
+          </para>
+        </sect4>
+        <sect4>
+          <title>Modes</title>
+          <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
+                      uint32_t max_height);</synopsis>
+          <para>
+            Fill the mode list with all supported modes for the connector. If the
+            <parameter>max_width</parameter> and <parameter>max_height</parameter>
+            arguments are non-zero, the implementation must ignore all modes wider
+            than <parameter>max_width</parameter> or higher than
+            <parameter>max_height</parameter>.
+          </para>
+          <para>
+            The connector must also fill in this operation its
+            <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields with the connected display
+            physical size in millimeters. The fields should be set to 0 if the value
+            isn't known or is not applicable (for instance for projector devices).
+          </para>
+        </sect4>
+        <sect4>
+          <title>Connection Status</title>
+          <para>
+            The connection status is updated through polling or hotplug events when
+            supported (see <xref linkend="drm-kms-connector-polled"/>). The status
+            value is reported to userspace through ioctls and must not be used
+            inside the driver, as it only gets initialized by a call to
+            <function>drm_mode_getconnector</function> from userspace.
+          </para>
+          <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
+                                        bool force);</synopsis>
+          <para>
+            Check to see if anything is attached to the connector. The
+            <parameter>force</parameter> parameter is set to false whilst polling or
+            to true when checking the connector due to user request.
+            <parameter>force</parameter> can be used by the driver to avoid
+            expensive, destructive operations during automated probing.
+          </para>
+          <para>
+            Return connector_status_connected if something is connected to the
+            connector, connector_status_disconnected if nothing is connected and
+            connector_status_unknown if the connection state isn't known.
+          </para>
+          <para>
+            Drivers should only return connector_status_connected if the connection
+            status has really been probed as connected. Connectors that can't detect
+            the connection status, or failed connection status probes, should return
+            connector_status_unknown.
+          </para>
+        </sect4>
+        <sect4>
+          <title>Miscellaneous</title>
+          <itemizedlist>
+            <listitem>
+              <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
+              <para>
+                Destroy the connector when not needed anymore. See
+                <xref linkend="drm-kms-init"/>.
+              </para>
+            </listitem>
+          </itemizedlist>
+        </sect4>
+      </sect3>
+    </sect2>
+    <sect2>
+      <title>Cleanup</title>
+      <para>
+        The DRM core manages its objects' lifetime. When an object is not needed
+	anymore the core calls its destroy function, which must clean up and
+	free every resource allocated for the object. Every
+	<function>drm_*_init</function> call must be matched with a
+	corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
+	(<function>drm_crtc_cleanup</function>), planes
+	(<function>drm_plane_cleanup</function>), encoders
+	(<function>drm_encoder_cleanup</function>) and connectors
+	(<function>drm_connector_cleanup</function>). Furthermore, connectors
+	that have been added to sysfs must be removed by a call to
+	<function>drm_sysfs_connector_remove</function> before calling
+	<function>drm_connector_cleanup</function>.
+      </para>
+      <para>
+        Connectors state change detection must be cleanup up with a call to
+	<function>drm_kms_helper_poll_fini</function>.
+      </para>
+    </sect2>
+    <sect2>
+      <title>Output discovery and initialization example</title>
+      <programlisting><![CDATA[
 void intel_crt_init(struct drm_device *dev)
 {
 	struct drm_connector *connector;
@@ -556,252 +1610,741 @@
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 	drm_sysfs_connector_add(connector);
-}
-]]>
+}]]></programlisting>
+      <para>
+        In the example above (taken from the i915 driver), a CRTC, connector and
+        encoder combination is created. A device-specific i2c bus is also
+        created for fetching EDID data and performing monitor detection. Once
+        the process is complete, the new connector is registered with sysfs to
+        make its properties available to applications.
+      </para>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: mid-layer helper functions -->
+
+  <sect1>
+    <title>Mid-layer Helper Functions</title>
+    <para>
+      The CRTC, encoder and connector functions provided by the drivers
+      implement the DRM API. They're called by the DRM core and ioctl handlers
+      to handle device state changes and configuration request. As implementing
+      those functions often requires logic not specific to drivers, mid-layer
+      helper functions are available to avoid duplicating boilerplate code.
+    </para>
+    <para>
+      The DRM core contains one mid-layer implementation. The mid-layer provides
+      implementations of several CRTC, encoder and connector functions (called
+      from the top of the mid-layer) that pre-process requests and call
+      lower-level functions provided by the driver (at the bottom of the
+      mid-layer). For instance, the
+      <function>drm_crtc_helper_set_config</function> function can be used to
+      fill the struct <structname>drm_crtc_funcs</structname>
+      <structfield>set_config</structfield> field. When called, it will split
+      the <methodname>set_config</methodname> operation in smaller, simpler
+      operations and call the driver to handle them.
+    </para>
+    <para>
+      To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
+      <function>drm_encoder_helper_add</function> and
+      <function>drm_connector_helper_add</function> functions to install their
+      mid-layer bottom operations handlers, and fill the
+      <structname>drm_crtc_funcs</structname>,
+      <structname>drm_encoder_funcs</structname> and
+      <structname>drm_connector_funcs</structname> structures with pointers to
+      the mid-layer top API functions. Installing the mid-layer bottom operation
+      handlers is best done right after registering the corresponding KMS object.
+    </para>
+    <para>
+      The mid-layer is not split between CRTC, encoder and connector operations.
+      To use it, a driver must provide bottom functions for all of the three KMS
+      entities.
+    </para>
+    <sect2>
+      <title>Helper Functions</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis>
+          <para>
+            The <function>drm_crtc_helper_set_config</function> helper function
+            is a CRTC <methodname>set_config</methodname> implementation. It
+            first tries to locate the best encoder for each connector by calling
+            the connector <methodname>best_encoder</methodname> helper
+            operation.
+          </para>
+          <para>
+            After locating the appropriate encoders, the helper function will
+            call the <methodname>mode_fixup</methodname> encoder and CRTC helper
+            operations to adjust the requested mode, or reject it completely in
+            which case an error will be returned to the application. If the new
+            configuration after mode adjustment is identical to the current
+            configuration the helper function will return without performing any
+            other operation.
+          </para>
+          <para>
+            If the adjusted mode is identical to the current mode but changes to
+            the frame buffer need to be applied, the
+            <function>drm_crtc_helper_set_config</function> function will call
+            the CRTC <methodname>mode_set_base</methodname> helper operation. If
+            the adjusted mode differs from the current mode, or if the
+            <methodname>mode_set_base</methodname> helper operation is not
+            provided, the helper function performs a full mode set sequence by
+            calling the <methodname>prepare</methodname>,
+            <methodname>mode_set</methodname> and
+            <methodname>commit</methodname> CRTC and encoder helper operations,
+            in that order.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis>
+          <para>
+            The <function>drm_helper_connector_dpms</function> helper function
+            is a connector <methodname>dpms</methodname> implementation that
+            tracks power state of connectors. To use the function, drivers must
+            provide <methodname>dpms</methodname> helper operations for CRTCs
+            and encoders to apply the DPMS state to the device.
+          </para>
+          <para>
+            The mid-layer doesn't track the power state of CRTCs and encoders.
+            The <methodname>dpms</methodname> helper operations can thus be
+            called with a mode identical to the currently active mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+                                            uint32_t maxX, uint32_t maxY);</synopsis>
+          <para>
+            The <function>drm_helper_probe_single_connector_modes</function> helper
+            function is a connector <methodname>fill_modes</methodname>
+            implementation that updates the connection status for the connector
+            and then retrieves a list of modes by calling the connector
+            <methodname>get_modes</methodname> helper operation.
+          </para>
+          <para>
+            The function filters out modes larger than
+            <parameter>max_width</parameter> and <parameter>max_height</parameter>
+            if specified. It then calls the connector
+            <methodname>mode_valid</methodname> helper operation for  each mode in
+            the probed list to check whether the mode is valid for the connector.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>CRTC Helper Operations</title>
+      <itemizedlist>
+        <listitem id="drm-helper-crtc-mode-fixup">
+          <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);</synopsis>
+          <para>
+            Let CRTCs adjust the requested mode or reject it completely. This
+            operation returns true if the mode is accepted (possibly after being
+            adjusted) or false if it is rejected.
+          </para>
+          <para>
+            The <methodname>mode_fixup</methodname> operation should reject the
+            mode if it can't reasonably use it. The definition of "reasonable"
+            is currently fuzzy in this context. One possible behaviour would be
+            to set the adjusted mode to the panel timings when a fixed-mode
+            panel is used with hardware capable of scaling. Another behaviour
+            would be to accept any input mode and adjust it to the closest mode
+            supported by the hardware (FIXME: This needs to be clarified).
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+                     struct drm_framebuffer *old_fb)</synopsis>
+          <para>
+            Move the CRTC on the current frame buffer (stored in
+            <literal>crtc-&gt;fb</literal>) to position (x,y). Any of the frame
+            buffer, x position or y position may have been modified.
+          </para>
+          <para>
+            This helper operation is optional. If not provided, the
+            <function>drm_crtc_helper_set_config</function> function will fall
+            back to the <methodname>mode_set</methodname> helper operation.
+          </para>
+          <note><para>
+            FIXME: Why are x and y passed as arguments, as they can be accessed
+            through <literal>crtc-&gt;x</literal> and
+            <literal>crtc-&gt;y</literal>?
+          </para></note>
+        </listitem>
+        <listitem>
+          <synopsis>void (*prepare)(struct drm_crtc *crtc);</synopsis>
+          <para>
+            Prepare the CRTC for mode setting. This operation is called after
+            validating the requested mode. Drivers use it to perform
+            device-specific operations required before setting the new mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                struct drm_display_mode *adjusted_mode, int x, int y,
+                struct drm_framebuffer *old_fb);</synopsis>
+          <para>
+            Set a new mode, position and frame buffer. Depending on the device
+            requirements, the mode can be stored internally by the driver and
+            applied in the <methodname>commit</methodname> operation, or
+            programmed to the hardware immediately.
+          </para>
+          <para>
+            The <methodname>mode_set</methodname> operation returns 0 on success
+	    or a negative error code if an error occurs.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*commit)(struct drm_crtc *crtc);</synopsis>
+          <para>
+            Commit a mode. This operation is called after setting the new mode.
+            Upon return the device must use the new mode and be fully
+            operational.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>Encoder Helper Operations</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);</synopsis>
+          <note><para>
+            FIXME: The mode argument be const, but the i915 driver modifies
+            mode-&gt;clock in <function>intel_dp_mode_fixup</function>.
+          </para></note>
+          <para>
+            Let encoders adjust the requested mode or reject it completely. This
+            operation returns true if the mode is accepted (possibly after being
+            adjusted) or false if it is rejected. See the
+            <link linkend="drm-helper-crtc-mode-fixup">mode_fixup CRTC helper
+            operation</link> for an explanation of the allowed adjustments.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*prepare)(struct drm_encoder *encoder);</synopsis>
+          <para>
+            Prepare the encoder for mode setting. This operation is called after
+            validating the requested mode. Drivers use it to perform
+            device-specific operations required before setting the new mode.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*mode_set)(struct drm_encoder *encoder,
+                 struct drm_display_mode *mode,
+                 struct drm_display_mode *adjusted_mode);</synopsis>
+          <para>
+            Set a new mode. Depending on the device requirements, the mode can
+            be stored internally by the driver and applied in the
+            <methodname>commit</methodname> operation, or programmed to the
+            hardware immediately.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>void (*commit)(struct drm_encoder *encoder);</synopsis>
+          <para>
+            Commit a mode. This operation is called after setting the new mode.
+            Upon return the device must use the new mode and be fully
+            operational.
+          </para>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+    <sect2>
+      <title>Connector Helper Operations</title>
+      <itemizedlist>
+        <listitem>
+          <synopsis>struct drm_encoder *(*best_encoder)(struct drm_connector *connector);</synopsis>
+          <para>
+            Return a pointer to the best encoder for the connecter. Device that
+            map connectors to encoders 1:1 simply return the pointer to the
+            associated encoder. This operation is mandatory.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
+          <para>
+            Fill the connector's <structfield>probed_modes</structfield> list
+            by parsing EDID data with <function>drm_add_edid_modes</function> or
+            calling <function>drm_mode_probed_add</function> directly for every
+            supported mode and return the number of modes it has detected. This
+            operation is mandatory.
+          </para>
+          <para>
+            When adding modes manually the driver creates each mode with a call to
+            <function>drm_mode_create</function> and must fill the following fields.
+            <itemizedlist>
+              <listitem>
+                <synopsis>__u32 type;</synopsis>
+                <para>
+                  Mode type bitmask, a combination of
+                  <variablelist>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_BUILTIN</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_CLOCK_C</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_CRTC_C</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>
+        DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
+                      </term>
+                      <listitem>
+                        <para>not used?</para>
+                      </listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_DEFAULT</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_USERDEF</term>
+                      <listitem><para>not used?</para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_TYPE_DRIVER</term>
+                      <listitem>
+                        <para>
+                          The mode has been created by the driver (as opposed to
+                          to user-created modes).
+                        </para>
+                      </listitem>
+                    </varlistentry>
+                  </variablelist>
+                  Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
+                  create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
+                  mode.
+                </para>
+              </listitem>
+              <listitem>
+                <synopsis>__u32 clock;</synopsis>
+                <para>Pixel clock frequency in kHz unit</para>
+              </listitem>
+              <listitem>
+                <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal;
+    __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis>
+                <para>Horizontal and vertical timing information</para>
+                <screen><![CDATA[
+             Active                 Front           Sync           Back
+             Region                 Porch                          Porch
+    <-----------------------><----------------><-------------><-------------->
+
+      //////////////////////|
+     ////////////////////// |
+    //////////////////////  |..................               ................
+                                               _______________
+
+    <----- [hv]display ----->
+    <------------- [hv]sync_start ------------>
+    <--------------------- [hv]sync_end --------------------->
+    <-------------------------------- [hv]total ----------------------------->
+]]></screen>
+              </listitem>
+              <listitem>
+                <synopsis>__u16 hskew;
+    __u16 vscan;</synopsis>
+                <para>Unknown</para>
+              </listitem>
+              <listitem>
+                <synopsis>__u32 flags;</synopsis>
+                <para>
+                  Mode flags, a combination of
+                  <variablelist>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PHSYNC</term>
+                      <listitem><para>
+                        Horizontal sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NHSYNC</term>
+                      <listitem><para>
+                        Horizontal sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PVSYNC</term>
+                      <listitem><para>
+                        Vertical sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NVSYNC</term>
+                      <listitem><para>
+                        Vertical sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_INTERLACE</term>
+                      <listitem><para>
+                        Mode is interlaced
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_DBLSCAN</term>
+                      <listitem><para>
+                        Mode uses doublescan
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_CSYNC</term>
+                      <listitem><para>
+                        Mode uses composite sync
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PCSYNC</term>
+                      <listitem><para>
+                        Composite sync is active high
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_NCSYNC</term>
+                      <listitem><para>
+                        Composite sync is active low
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_HSKEW</term>
+                      <listitem><para>
+                        hskew provided (not used?)
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_BCAST</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_PIXMUX</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_DBLCLK</term>
+                      <listitem><para>
+                        not used?
+                      </para></listitem>
+                    </varlistentry>
+                    <varlistentry>
+                      <term>DRM_MODE_FLAG_CLKDIV2</term>
+                      <listitem><para>
+                        ?
+                      </para></listitem>
+                    </varlistentry>
+                  </variablelist>
+                </para>
+                <para>
+                  Note that modes marked with the INTERLACE or DBLSCAN flags will be
+                  filtered out by
+                  <function>drm_helper_probe_single_connector_modes</function> if
+                  the connector's <structfield>interlace_allowed</structfield> or
+                  <structfield>doublescan_allowed</structfield> field is set to 0.
+                </para>
+              </listitem>
+              <listitem>
+                <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis>
+                <para>
+                  Mode name. The driver must call
+                  <function>drm_mode_set_name</function> to fill the mode name from
+                  <structfield>hdisplay</structfield>,
+                  <structfield>vdisplay</structfield> and interlace flag after
+                  filling the corresponding fields.
+                </para>
+              </listitem>
+            </itemizedlist>
+          </para>
+          <para>
+            The <structfield>vrefresh</structfield> value is computed by
+            <function>drm_helper_probe_single_connector_modes</function>.
+          </para>
+          <para>
+            When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+            connector <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields. When creating modes
+            manually the <methodname>get_modes</methodname> helper operation must
+            set the <structfield>display_info</structfield>
+            <structfield>width_mm</structfield> and
+            <structfield>height_mm</structfield> fields if they haven't been set
+            already (for instance at initilization time when a fixed-size panel is
+            attached to the connector). The mode <structfield>width_mm</structfield>
+            and <structfield>height_mm</structfield> fields are only used internally
+            during EDID parsing and should not be set when creating modes manually.
+          </para>
+        </listitem>
+        <listitem>
+          <synopsis>int (*mode_valid)(struct drm_connector *connector,
+		  struct drm_display_mode *mode);</synopsis>
+          <para>
+            Verify whether a mode is valid for the connector. Return MODE_OK for
+            supported modes and one of the enum drm_mode_status values (MODE_*)
+            for unsupported modes. This operation is mandatory.
+          </para>
+          <para>
+            As the mode rejection reason is currently not used beside for
+            immediately removing the unsupported mode, an implementation can
+            return MODE_BAD regardless of the exact reason why the mode is not
+            valid.
+          </para>
+          <note><para>
+            Note that the <methodname>mode_valid</methodname> helper operation is
+            only called for modes detected by the device, and
+            <emphasis>not</emphasis> for modes set by the user through the CRTC
+            <methodname>set_config</methodname> operation.
+          </para></note>
+        </listitem>
+      </itemizedlist>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: vertical blanking -->
+
+  <sect1 id="drm-vertical-blank">
+    <title>Vertical Blanking</title>
+    <para>
+      Vertical blanking plays a major role in graphics rendering. To achieve
+      tear-free display, users must synchronize page flips and/or rendering to
+      vertical blanking. The DRM API offers ioctls to perform page flips
+      synchronized to vertical blanking and wait for vertical blanking.
+    </para>
+    <para>
+      The DRM core handles most of the vertical blanking management logic, which
+      involves filtering out spurious interrupts, keeping race-free blanking
+      counters, coping with counter wrap-around and resets and keeping use
+      counts. It relies on the driver to generate vertical blanking interrupts
+      and optionally provide a hardware vertical blanking counter. Drivers must
+      implement the following operations.
+    </para>
+    <itemizedlist>
+      <listitem>
+        <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
+void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
+        <para>
+	  Enable or disable vertical blanking interrupts for the given CRTC.
+	</para>
+      </listitem>
+      <listitem>
+        <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
+        <para>
+	  Retrieve the value of the vertical blanking counter for the given
+	  CRTC. If the hardware maintains a vertical blanking counter its value
+	  should be returned. Otherwise drivers can use the
+	  <function>drm_vblank_count</function> helper function to handle this
+	  operation.
+	</para>
+      </listitem>
+    </itemizedlist>
+    <para>
+      Drivers must initialize the vertical blanking handling core with a call to
+      <function>drm_vblank_init</function> in their
+      <methodname>load</methodname> operation. The function will set the struct
+      <structname>drm_device</structname>
+      <structfield>vblank_disable_allowed</structfield> field to 0. This will
+      keep vertical blanking interrupts enabled permanently until the first mode
+      set operation, where <structfield>vblank_disable_allowed</structfield> is
+      set to 1. The reason behind this is not clear. Drivers can set the field
+      to 1 after <function>calling drm_vblank_init</function> to make vertical
+      blanking interrupts dynamically managed from the beginning.
+    </para>
+    <para>
+      Vertical blanking interrupts can be enabled by the DRM core or by drivers
+      themselves (for instance to handle page flipping operations). The DRM core
+      maintains a vertical blanking use count to ensure that the interrupts are
+      not disabled while a user still needs them. To increment the use count,
+      drivers call <function>drm_vblank_get</function>. Upon return vertical
+      blanking interrupts are guaranteed to be enabled.
+    </para>
+    <para>
+      To decrement the use count drivers call
+      <function>drm_vblank_put</function>. Only when the use count drops to zero
+      will the DRM core disable the vertical blanking interrupts after a delay
+      by scheduling a timer. The delay is accessible through the vblankoffdelay
+      module parameter or the <varname>drm_vblank_offdelay</varname> global
+      variable and expressed in milliseconds. Its default value is 5000 ms.
+    </para>
+    <para>
+      When a vertical blanking interrupt occurs drivers only need to call the
+      <function>drm_handle_vblank</function> function to account for the
+      interrupt.
+    </para>
+    <para>
+      Resources allocated by <function>drm_vblank_init</function> must be freed
+      with a call to <function>drm_vblank_cleanup</function> in the driver
+      <methodname>unload</methodname> operation handler.
+    </para>
+  </sect1>
+
+  <!-- Internals: open/close, file operations and ioctls -->
+
+  <sect1>
+    <title>Open/Close, File Operations and IOCTLs</title>
+    <sect2>
+      <title>Open and Close</title>
+      <synopsis>int (*firstopen) (struct drm_device *);
+void (*lastclose) (struct drm_device *);
+int (*open) (struct drm_device *, struct drm_file *);
+void (*preclose) (struct drm_device *, struct drm_file *);
+void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
+      <abstract>Open and close handlers. None of those methods are mandatory.
+      </abstract>
+      <para>
+        The <methodname>firstopen</methodname> method is called by the DRM core
+	when an application opens a device that has no other opened file handle.
+	Similarly the <methodname>lastclose</methodname> method is called when
+	the last application holding a file handle opened on the device closes
+	it. Both methods are mostly used for UMS (User Mode Setting) drivers to
+	acquire and release device resources which should be done in the
+	<methodname>load</methodname> and <methodname>unload</methodname>
+	methods for KMS drivers.
+      </para>
+      <para>
+        Note that the <methodname>lastclose</methodname> method is also called
+	at module unload time or, for hot-pluggable devices, when the device is
+	unplugged. The <methodname>firstopen</methodname> and
+	<methodname>lastclose</methodname> calls can thus be unbalanced.
+      </para>
+      <para>
+        The <methodname>open</methodname> method is called every time the device
+	is opened by an application. Drivers can allocate per-file private data
+	in this method and store them in the struct
+	<structname>drm_file</structname> <structfield>driver_priv</structfield>
+	field. Note that the <methodname>open</methodname> method is called
+	before <methodname>firstopen</methodname>.
+      </para>
+      <para>
+        The close operation is split into <methodname>preclose</methodname> and
+	<methodname>postclose</methodname> methods. Drivers must stop and
+	cleanup all per-file operations in the <methodname>preclose</methodname>
+	method. For instance pending vertical blanking and page flip events must
+	be cancelled. No per-file operation is allowed on the file handle after
+	returning from the <methodname>preclose</methodname> method.
+      </para>
+      <para>
+        Finally the <methodname>postclose</methodname> method is called as the
+	last step of the close operation, right before calling the
+	<methodname>lastclose</methodname> method if no other open file handle
+	exists for the device. Drivers that have allocated per-file private data
+	in the <methodname>open</methodname> method should free it here.
+      </para>
+      <para>
+        The <methodname>lastclose</methodname> method should restore CRTC and
+	plane properties to default value, so that a subsequent open of the
+	device will not inherit state from the previous user.
+      </para>
+    </sect2>
+    <sect2>
+      <title>File Operations</title>
+      <synopsis>const struct file_operations *fops</synopsis>
+      <abstract>File operations for the DRM device node.</abstract>
+      <para>
+        Drivers must define the file operations structure that forms the DRM
+	userspace API entry point, even though most of those operations are
+	implemented in the DRM core. The <methodname>open</methodname>,
+	<methodname>release</methodname> and <methodname>ioctl</methodname>
+	operations are handled by
+	<programlisting>
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+  #ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+  #endif
+        </programlisting>
+      </para>
+      <para>
+        Drivers that implement private ioctls that requires 32/64bit
+	compatibility support must provide their own
+	<methodname>compat_ioctl</methodname> handler that processes private
+	ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.
+      </para>
+      <para>
+        The <methodname>read</methodname> and <methodname>poll</methodname>
+	operations provide support for reading DRM events and polling them. They
+	are implemented by
+	<programlisting>
+	.poll = drm_poll,
+	.read = drm_read,
+	.fasync = drm_fasync,
+	.llseek = no_llseek,
 	</programlisting>
+      </para>
+      <para>
+        The memory mapping implementation varies depending on how the driver
+	manages memory. Pre-GEM drivers will use <function>drm_mmap</function>,
+	while GEM-aware drivers will use <function>drm_gem_mmap</function>. See
+	<xref linkend="drm-gem"/>.
+	<programlisting>
+	.mmap = drm_gem_mmap,
+	</programlisting>
+      </para>
+      <para>
+        No other file operation is supported by the DRM API.
+      </para>
+    </sect2>
+    <sect2>
+      <title>IOCTLs</title>
+      <synopsis>struct drm_ioctl_desc *ioctls;
+int num_ioctls;</synopsis>
+      <abstract>Driver-specific ioctls descriptors table.</abstract>
+      <para>
+        Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+	descriptors table is indexed by the ioctl number offset from the base
+	value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
+	table entries.
+      </para>
+      <para>
+        <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
 	<para>
-	  In the example above (again, taken from the i915 driver), a
-	  CRT connector and encoder combination is created.  A device-specific
-	  i2c bus is also created for fetching EDID data and
-	  performing monitor detection.  Once the process is complete,
-	  the new connector is registered with sysfs to make its
-	  properties available to applications.
+	  <parameter>ioctl</parameter> is the ioctl name. Drivers must define
+	  the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
+	  offset from DRM_COMMAND_BASE and the ioctl number respectively. The
+	  first macro is private to the device while the second must be exposed
+	  to userspace in a public header.
 	</para>
-	<sect4>
-	  <title>Helper functions and core functions</title>
-	  <para>
-	    Since many PC-class graphics devices have similar display output
-	    designs, the DRM provides a set of helper functions to make
-	    output management easier.  The core helper routines handle
-	    encoder re-routing and the disabling of unused functions following
-	    mode setting.  Using the helpers is optional, but recommended for
-	    devices with PC-style architectures (i.e. a set of display planes
-	    for feeding pixels to encoders which are in turn routed to
-	    connectors).  Devices with more complex requirements needing
-	    finer grained management may opt to use the core callbacks
-	    directly.
-	  </para>
-	  <para>
-	    [Insert typical diagram here.]  [Insert OMAP style config here.]
-	  </para>
-	</sect4>
 	<para>
-	  Each encoder object needs to provide:
+	  <parameter>func</parameter> is a pointer to the ioctl handler function
+	  compatible with the <type>drm_ioctl_t</type> type.
+	  <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);</programlisting>
+	</para>
+	<para>
+	  <parameter>flags</parameter> is a bitmask combination of the following
+	  values. It restricts how the ioctl is allowed to be called.
 	  <itemizedlist>
-	    <listitem>
-	      A DPMS (basically on/off) function.
-	    </listitem>
-	    <listitem>
-	      A mode-fixup function (for converting requested modes into
-	      native hardware timings).
-	    </listitem>
-	    <listitem>
-	      Functions (prepare, set, and commit) for use by the core DRM
-	      helper functions.
-	    </listitem>
+	    <listitem><para>
+	      DRM_AUTH - Only authenticated callers allowed
+	    </para></listitem>
+	    <listitem><para>
+	      DRM_MASTER - The ioctl can only be called on the master file
+	      handle
+	    </para></listitem>
+            <listitem><para>
+	      DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+	    </para></listitem>
+            <listitem><para>
+	      DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+	      device
+	    </para></listitem>
+            <listitem><para>
+	      DRM_UNLOCKED - The ioctl handler will be called without locking
+	      the DRM global mutex
+	    </para></listitem>
 	  </itemizedlist>
-	  Connector helpers need to provide functions (mode-fetch, validity,
-	  and encoder-matching) for returning an ideal encoder for a given
-	  connector.  The core connector functions include a DPMS callback,
-	  save/restore routines (deprecated), detection, mode probing,
-	  property handling, and cleanup functions.
 	</para>
-<!--!Edrivers/char/drm/drm_crtc.h-->
-<!--!Edrivers/char/drm/drm_crtc.c-->
-<!--!Edrivers/char/drm/drm_crtc_helper.c-->
-      </sect3>
-    </sect2>
-  </sect1>
-
-  <!-- Internals: vblank handling -->
-
-  <sect1>
-    <title>VBlank event handling</title>
-    <para>
-      The DRM core exposes two vertical blank related ioctls:
-      <variablelist>
-        <varlistentry>
-          <term>DRM_IOCTL_WAIT_VBLANK</term>
-          <listitem>
-            <para>
-              This takes a struct drm_wait_vblank structure as its argument,
-              and it is used to block or request a signal when a specified
-              vblank event occurs.
-            </para>
-          </listitem>
-        </varlistentry>
-        <varlistentry>
-          <term>DRM_IOCTL_MODESET_CTL</term>
-          <listitem>
-            <para>
-              This should be called by application level drivers before and
-              after mode setting, since on many devices the vertical blank
-              counter is reset at that time.  Internally, the DRM snapshots
-              the last vblank count when the ioctl is called with the
-              _DRM_PRE_MODESET command, so that the counter won't go backwards
-              (which is dealt with when _DRM_POST_MODESET is used).
-            </para>
-          </listitem>
-        </varlistentry>
-      </variablelist>
-<!--!Edrivers/char/drm/drm_irq.c-->
-    </para>
-    <para>
-      To support the functions above, the DRM core provides several
-      helper functions for tracking vertical blank counters, and
-      requires drivers to provide several callbacks:
-      get_vblank_counter(), enable_vblank() and disable_vblank().  The
-      core uses get_vblank_counter() to keep the counter accurate
-      across interrupt disable periods.  It should return the current
-      vertical blank event count, which is often tracked in a device
-      register.  The enable and disable vblank callbacks should enable
-      and disable vertical blank interrupts, respectively.  In the
-      absence of DRM clients waiting on vblank events, the core DRM
-      code uses the disable_vblank() function to disable
-      interrupts, which saves power.  They are re-enabled again when
-      a client calls the vblank wait ioctl above.
-    </para>
-    <para>
-      A device that doesn't provide a count register may simply use an
-      internal atomic counter incremented on every vertical blank
-      interrupt (and then treat the enable_vblank() and disable_vblank()
-      callbacks as no-ops).
-    </para>
-  </sect1>
-
-  <sect1>
-    <title>Memory management</title>
-    <para>
-      The memory manager lies at the heart of many DRM operations; it
-      is required to support advanced client features like OpenGL
-      pbuffers.  The DRM currently contains two memory managers: TTM
-      and GEM.
-    </para>
-
-    <sect2>
-      <title>The Translation Table Manager (TTM)</title>
-      <para>
-	TTM was developed by Tungsten Graphics, primarily by Thomas
-	Hellström, and is intended to be a flexible, high performance
-	graphics memory manager.
-      </para>
-      <para>
-	Drivers wishing to support TTM must fill out a drm_bo_driver
-	structure.
-      </para>
-      <para>
-	TTM design background and information belongs here.
       </para>
     </sect2>
-
-    <sect2>
-      <title>The Graphics Execution Manager (GEM)</title>
-      <para>
-	GEM is an Intel project, authored by Eric Anholt and Keith
-	Packard.  It provides simpler interfaces than TTM, and is well
-	suited for UMA devices.
-      </para>
-      <para>
-	GEM-enabled drivers must provide gem_init_object() and
-	gem_free_object() callbacks to support the core memory
-	allocation routines.  They should also provide several driver-specific
-	ioctls to support command execution, pinning, buffer
-	read &amp; write, mapping, and domain ownership transfers.
-      </para>
-      <para>
-	On a fundamental level, GEM involves several operations:
-	<itemizedlist>
-	  <listitem>Memory allocation and freeing</listitem>
-	  <listitem>Command execution</listitem>
-	  <listitem>Aperture management at command execution time</listitem>
-	</itemizedlist>
-	Buffer object allocation is relatively
-	straightforward and largely provided by Linux's shmem layer, which
-	provides memory to back each object.  When mapped into the GTT
-	or used in a command buffer, the backing pages for an object are
-	flushed to memory and marked write combined so as to be coherent
-	with the GPU.  Likewise, if the CPU accesses an object after the GPU
-	has finished rendering to the object, then the object must be made
-	coherent with the CPU's view
-	of memory, usually involving GPU cache flushing of various kinds.
-	This core CPU&lt;-&gt;GPU coherency management is provided by a
-	device-specific ioctl, which evaluates an object's current domain and
-	performs any necessary flushing or synchronization to put the object
-	into the desired coherency domain (note that the object may be busy,
-	i.e. an active render target; in that case, setting the domain
-	blocks the client and waits for rendering to complete before
-	performing any necessary flushing operations).
-      </para>
-      <para>
-	Perhaps the most important GEM function is providing a command
-	execution interface to clients.  Client programs construct command
-	buffers containing references to previously allocated memory objects,
-	and then submit them to GEM.  At that point, GEM takes care to bind
-	all the objects into the GTT, execute the buffer, and provide
-	necessary synchronization between clients accessing the same buffers.
-	This often involves evicting some objects from the GTT and re-binding
-	others (a fairly expensive operation), and providing relocation
-	support which hides fixed GTT offsets from clients.  Clients must
-	take care not to submit command buffers that reference more objects
-	than can fit in the GTT; otherwise, GEM will reject them and no rendering
-	will occur.  Similarly, if several objects in the buffer require
-	fence registers to be allocated for correct rendering (e.g. 2D blits
-	on pre-965 chips), care must be taken not to require more fence
-	registers than are available to the client.  Such resource management
-	should be abstracted from the client in libdrm.
-      </para>
-    </sect2>
-
-  </sect1>
-
-  <!-- Output management -->
-  <sect1>
-    <title>Output management</title>
-    <para>
-      At the core of the DRM output management code is a set of
-      structures representing CRTCs, encoders, and connectors.
-    </para>
-    <para>
-      A CRTC is an abstraction representing a part of the chip that
-      contains a pointer to a scanout buffer.  Therefore, the number
-      of CRTCs available determines how many independent scanout
-      buffers can be active at any given time.  The CRTC structure
-      contains several fields to support this: a pointer to some video
-      memory, a display mode, and an (x, y) offset into the video
-      memory to support panning or configurations where one piece of
-      video memory spans multiple CRTCs.
-    </para>
-    <para>
-      An encoder takes pixel data from a CRTC and converts it to a
-      format suitable for any attached connectors.  On some devices,
-      it may be possible to have a CRTC send data to more than one
-      encoder.  In that case, both encoders would receive data from
-      the same scanout buffer, resulting in a "cloned" display
-      configuration across the connectors attached to each encoder.
-    </para>
-    <para>
-      A connector is the final destination for pixel data on a device,
-      and usually connects directly to an external display device like
-      a monitor or laptop panel.  A connector can only be attached to
-      one encoder at a time.  The connector is also the structure
-      where information about the attached display is kept, so it
-      contains fields for display data, EDID data, DPMS &amp;
-      connection status, and information about modes supported on the
-      attached displays.
-    </para>
-<!--!Edrivers/char/drm/drm_crtc.c-->
-  </sect1>
-
-  <sect1>
-    <title>Framebuffer management</title>
-    <para>
-      Clients need to provide a framebuffer object which provides a source
-      of pixels for a CRTC to deliver to the encoder(s) and ultimately the
-      connector(s). A framebuffer is fundamentally a driver-specific memory
-      object, made into an opaque handle by the DRM's addfb() function.
-      Once a framebuffer has been created this way, it may be passed to the
-      KMS mode setting routines for use in a completed configuration.
-    </para>
   </sect1>
 
   <sect1>
@@ -812,15 +2355,24 @@
     </para>
   </sect1>
 
+  <!-- Internals: suspend/resume -->
+
   <sect1>
-    <title>Suspend/resume</title>
+    <title>Suspend/Resume</title>
     <para>
-      The DRM core provides some suspend/resume code, but drivers
-      wanting full suspend/resume support should provide save() and
-      restore() functions.  These are called at suspend,
-      hibernate, or resume time, and should perform any state save or
-      restore required by your device across suspend or hibernate
-      states.
+      The DRM core provides some suspend/resume code, but drivers wanting full
+      suspend/resume support should provide save() and restore() functions.
+      These are called at suspend, hibernate, or resume time, and should perform
+      any state save or restore required by your device across suspend or
+      hibernate states.
+    </para>
+    <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
+int (*resume) (struct drm_device *);</synopsis>
+    <para>
+      Those are legacy suspend and resume methods. New driver should use the
+      power management interface provided by their bus type (usually through
+      the struct <structname>device_driver</structname> dev_pm_ops) and set
+      these methods to NULL.
     </para>
   </sect1>
 
@@ -833,6 +2385,35 @@
   </sect1>
   </chapter>
 
+<!-- TODO
+
+- Add a glossary
+- Document the struct_mutex catch-all lock
+- Document connector properties
+
+- Why is the load method optional?
+- What are drivers supposed to set the initial display state to, and how?
+  Connector's DPMS states are not initialized and are thus equal to
+  DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
+  drm_helper_disable_unused_functions(), which disables unused encoders and
+  CRTCs, but doesn't touch the connectors' DPMS state, and
+  drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
+  that don't implement (or just don't use) fbcon compatibility need to call
+  those functions themselves?
+- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
+  around mode setting. Should this be done in the DRM core?
+- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
+  call and never set back to 0. It seems to be safe to permanently set it to 1
+  in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
+  well. This should be investigated.
+- crtc and connector .save and .restore operations are only used internally in
+  drivers, should they be removed from the core?
+- encoder mid-layer .save and .restore operations are only used internally in
+  drivers, should they be removed from the core?
+- encoder mid-layer .detect operation is only used internally in drivers,
+  should it be removed from the core?
+-->
+
   <!-- External interfaces -->
 
   <chapter id="drmExternals">
@@ -853,6 +2434,42 @@
       Cover generic ioctls and sysfs layout here.  We only need high-level
       info, since man pages should cover the rest.
     </para>
+
+  <!-- External: vblank handling -->
+
+    <sect1>
+      <title>VBlank event handling</title>
+      <para>
+        The DRM core exposes two vertical blank related ioctls:
+        <variablelist>
+          <varlistentry>
+            <term>DRM_IOCTL_WAIT_VBLANK</term>
+            <listitem>
+              <para>
+                This takes a struct drm_wait_vblank structure as its argument,
+                and it is used to block or request a signal when a specified
+                vblank event occurs.
+              </para>
+            </listitem>
+          </varlistentry>
+          <varlistentry>
+            <term>DRM_IOCTL_MODESET_CTL</term>
+            <listitem>
+              <para>
+                This should be called by application level drivers before and
+                after mode setting, since on many devices the vertical blank
+                counter is reset at that time.  Internally, the DRM snapshots
+                the last vblank count when the ioctl is called with the
+                _DRM_PRE_MODESET command, so that the counter won't go backwards
+                (which is dealt with when _DRM_POST_MODESET is used).
+              </para>
+            </listitem>
+          </varlistentry>
+        </variablelist>
+<!--!Edrivers/char/drm/drm_irq.c-->
+      </para>
+    </sect1>
+
   </chapter>
 
   <!-- API reference -->
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 58e32f7..e01f5ea 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -84,40 +84,33 @@
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
-			 struct scatterlist **sg_list, int *num_sg)
+static int intel_gtt_map_memory(struct page **pages,
+				unsigned int num_entries,
+				struct sg_table *st)
 {
-	struct sg_table st;
 	struct scatterlist *sg;
 	int i;
 
-	if (*sg_list)
-		return 0; /* already mapped (for e.g. resume */
-
 	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
 
-	if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
 		goto err;
 
-	*sg_list = sg = st.sgl;
-
-	for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+	for_each_sg(st->sgl, sg, num_entries, i)
 		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
 
-	*num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
-				 num_entries, PCI_DMA_BIDIRECTIONAL);
-	if (unlikely(!*num_sg))
+	if (!pci_map_sg(intel_private.pcidev,
+			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
 		goto err;
 
 	return 0;
 
 err:
-	sg_free_table(&st);
+	sg_free_table(st);
 	return -ENOMEM;
 }
-EXPORT_SYMBOL(intel_gtt_map_memory);
 
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
 {
 	struct sg_table st;
 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -130,7 +123,6 @@
 
 	sg_free_table(&st);
 }
-EXPORT_SYMBOL(intel_gtt_unmap_memory);
 
 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
 {
@@ -674,9 +666,14 @@
 
 	gtt_map_size = intel_private.base.gtt_total_entries * 4;
 
-	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
-				    gtt_map_size);
-	if (!intel_private.gtt) {
+	intel_private.gtt = NULL;
+	if (INTEL_GTT_GEN < 6)
+		intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+					       gtt_map_size);
+	if (intel_private.gtt == NULL)
+		intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+					    gtt_map_size);
+	if (intel_private.gtt == NULL) {
 		intel_private.driver->cleanup();
 		iounmap(intel_private.registers);
 		return -ENOMEM;
@@ -879,8 +876,7 @@
 	return false;
 }
 
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-				 unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
 				 unsigned int pg_start,
 				 unsigned int flags)
 {
@@ -892,12 +888,11 @@
 
 	/* sg may merge pages, but we have to separate
 	 * per-page addr for GTT */
-	for_each_sg(sg_list, sg, sg_len, i) {
+	for_each_sg(st->sgl, sg, st->nents, i) {
 		len = sg_dma_len(sg) >> PAGE_SHIFT;
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-			intel_private.driver->write_entry(addr,
-							  j, flags);
+			intel_private.driver->write_entry(addr, j, flags);
 			j++;
 		}
 	}
@@ -905,8 +900,10 @@
 }
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
-			    struct page **pages, unsigned int flags)
+static void intel_gtt_insert_pages(unsigned int first_entry,
+				   unsigned int num_entries,
+				   struct page **pages,
+				   unsigned int flags)
 {
 	int i, j;
 
@@ -917,7 +914,6 @@
 	}
 	readl(intel_private.gtt+j-1);
 }
-EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
@@ -953,13 +949,15 @@
 		global_cache_flush();
 
 	if (intel_private.base.needs_dmar) {
-		ret = intel_gtt_map_memory(mem->pages, mem->page_count,
-					   &mem->sg_list, &mem->num_sg);
+		struct sg_table st;
+
+		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
 		if (ret != 0)
 			return ret;
 
-		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
-					    pg_start, type);
+		intel_gtt_insert_sg_entries(&st, pg_start, type);
+		mem->sg_list = st.sgl;
+		mem->num_sg = st.nents;
 	} else
 		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
 				       type);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e2808..18321b68b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,7 +22,7 @@
 config DRM_USB
 	tristate
 	depends on DRM
-	depends on USB_ARCH_HAS_HCD
+	depends on USB_SUPPORT && USB_ARCH_HAS_HCD
 	select USB
 
 config DRM_KMS_HELPER
@@ -54,6 +54,21 @@
 	  GPU memory types. Will be enabled automatically if a device driver
 	  uses it.
 
+config DRM_GEM_CMA_HELPER
+	bool
+	depends on DRM
+	help
+	  Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+	bool
+	select DRM_GEM_CMA_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	help
+	  Choose this if you need the KMS CMA helper functions
+
 config DRM_TDFX
 	tristate "3dfx Banshee/Voodoo3+"
 	depends on DRM && PCI
@@ -193,3 +208,5 @@
 source "drivers/gpu/drm/mgag200/Kconfig"
 
 source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f65f65e..2ff5cef 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,11 +15,13 @@
 		drm_trace_points.o drm_global.o drm_prime.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 
 drm-usb-y   := drm_usb.o
 
 drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 
@@ -45,4 +47,5 @@
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d4af9ed..20a437f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a712caf..f3b2a7c 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -737,6 +737,7 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(&ast_connector->base, edid);
 		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
 		return ret;
 	} else
 		drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 64ea597..5d04564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 	bool mm_inited;
 };
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 08758e0..4a4274b 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -37,12 +37,13 @@
 {
 	uint8_t *page_virtual;
 	unsigned int i;
+	const int size = boot_cpu_data.x86_clflush_size;
 
 	if (unlikely(page == NULL))
 		return;
 
 	page_virtual = kmap_atomic(page);
-	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+	for (i = 0; i < PAGE_SIZE; i += size)
 		clflush(page_virtual + i);
 	kunmap_atomic(page_virtual);
 }
@@ -100,6 +101,31 @@
 EXPORT_SYMBOL(drm_clflush_pages);
 
 void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		struct scatterlist *sg;
+		int i;
+
+		mb();
+		for_each_sg(st->sgl, sg, st->nents, i)
+			drm_clflush_page(sg_page(sg));
+		mb();
+
+		return;
+	}
+
+	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+	WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
 drm_clflush_virt_range(char *addr, unsigned long length)
 {
 #if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6fbfc24..c317f72 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2169,6 +2169,8 @@
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 	case DRM_FORMAT_YUV410:
 	case DRM_FORMAT_YVU410:
 	case DRM_FORMAT_YUV411:
@@ -3718,6 +3720,8 @@
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 		return 2;
 	default:
 		return 1;
@@ -3751,6 +3755,8 @@
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 		return plane ? 2 : 1;
 	case DRM_FORMAT_YUV410:
 	case DRM_FORMAT_YVU410:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9238de4..1490e76 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -156,14 +156,14 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b7ee230..289c9b1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -161,7 +161,7 @@
  * Sanity check the EDID block (base or extension).  Return 0 if the block
  * doesn't check out, or 1 if it's valid.
  */
-bool drm_edid_block_valid(u8 *raw_edid, int block)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
 {
 	int i;
 	u8 csum = 0;
@@ -184,7 +184,9 @@
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
 	if (csum) {
-		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		if (print_bad_edid) {
+			DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		}
 
 		/* allow CEA to slide through, switches mangle this */
 		if (raw_edid[0] != 0x02)
@@ -210,7 +212,7 @@
 	return 1;
 
 bad:
-	if (raw_edid) {
+	if (raw_edid && print_bad_edid) {
 		printk(KERN_ERR "Raw EDID:\n");
 		print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
 			       raw_edid, EDID_LENGTH, false);
@@ -234,7 +236,7 @@
 		return false;
 
 	for (i = 0; i <= edid->extensions; i++)
-		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
+		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
 			return false;
 
 	return true;
@@ -257,6 +259,8 @@
 		      int block, int len)
 {
 	unsigned char start = block * EDID_LENGTH;
+	unsigned char segment = block >> 1;
+	unsigned char xfers = segment ? 3 : 2;
 	int ret, retries = 5;
 
 	/* The core i2c driver will automatically retry the transfer if the
@@ -268,6 +272,11 @@
 	do {
 		struct i2c_msg msgs[] = {
 			{
+				.addr	= DDC_SEGMENT_ADDR,
+				.flags	= 0,
+				.len	= 1,
+				.buf	= &segment,
+			}, {
 				.addr	= DDC_ADDR,
 				.flags	= 0,
 				.len	= 1,
@@ -279,15 +288,21 @@
 				.buf	= buf,
 			}
 		};
-		ret = i2c_transfer(adapter, msgs, 2);
+
+	/*
+	 * Avoid sending the segment addr to not upset non-compliant ddc
+	 * monitors.
+	 */
+		ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
 		if (ret == -ENXIO) {
 			DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
 					adapter->name);
 			break;
 		}
-	} while (ret != 2 && --retries);
+	} while (ret != xfers && --retries);
 
-	return ret == 2 ? 0 : -1;
+	return ret == xfers ? 0 : -1;
 }
 
 static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -306,6 +321,7 @@
 {
 	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
@@ -314,7 +330,7 @@
 	for (i = 0; i < 4; i++) {
 		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
 			goto out;
-		if (drm_edid_block_valid(block, 0))
+		if (drm_edid_block_valid(block, 0, print_bad_edid))
 			break;
 		if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
 			connector->null_edid_counter++;
@@ -339,7 +355,7 @@
 				  block + (valid_extensions + 1) * EDID_LENGTH,
 				  j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
 				valid_extensions++;
 				break;
 			}
@@ -362,8 +378,11 @@
 	return block;
 
 carp:
-	dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
-		 drm_get_connector_name(connector), j);
+	if (print_bad_edid) {
+		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+			 drm_get_connector_name(connector), j);
+	}
+	connector->bad_edid_counter++;
 
 out:
 	kfree(block);
@@ -402,10 +421,7 @@
 	if (drm_probe_ddc(adapter))
 		edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	return edid;
-
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -1523,16 +1539,57 @@
 }
 
 static int
+cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
 	u8 * cea = drm_find_cea_extension(edid);
 	u8 * db, dbl;
 	int modes = 0;
 
-	if (cea && cea[1] >= 3) {
-		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-			dbl = db[0] & 0x1f;
-			if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return 0;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			if (cea_db_tag(db) == VIDEO_BLOCK)
 				modes += do_cea_modes (connector, db+1, dbl);
 		}
 	}
@@ -1541,19 +1598,28 @@
 }
 
 static void
-parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
 {
-	connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+	u8 len = cea_db_payload_len(db);
 
-	connector->dvi_dual = db[6] & 1;
-	connector->max_tmds_clock = db[7] * 5;
-
-	connector->latency_present[0] = db[8] >> 7;
-	connector->latency_present[1] = (db[8] >> 6) & 1;
-	connector->video_latency[0] = db[9];
-	connector->audio_latency[0] = db[10];
-	connector->video_latency[1] = db[11];
-	connector->audio_latency[1] = db[12];
+	if (len >= 6) {
+		connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+		connector->dvi_dual = db[6] & 1;
+	}
+	if (len >= 7)
+		connector->max_tmds_clock = db[7] * 5;
+	if (len >= 8) {
+		connector->latency_present[0] = db[8] >> 7;
+		connector->latency_present[1] = (db[8] >> 6) & 1;
+	}
+	if (len >= 9)
+		connector->video_latency[0] = db[9];
+	if (len >= 10)
+		connector->audio_latency[0] = db[10];
+	if (len >= 11)
+		connector->video_latency[1] = db[11];
+	if (len >= 12)
+		connector->audio_latency[1] = db[12];
 
 	DRM_LOG_KMS("HDMI: DVI dual %d, "
 		    "max TMDS clock %d, "
@@ -1577,6 +1643,21 @@
 		*(u8 **)data = t->data.other_data.data.str.str;
 }
 
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 5)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IDENTIFIER;
+}
+
 /**
  * drm_edid_to_eld - build ELD from EDID
  * @connector: connector corresponding to the HDMI/DP sink
@@ -1623,29 +1704,40 @@
 	eld[18] = edid->prod_code[0];
 	eld[19] = edid->prod_code[1];
 
-	if (cea[1] >= 3)
-		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-			dbl = db[0] & 0x1f;
-			
-			switch ((db[0] & 0xe0) >> 5) {
+	if (cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end)) {
+			start = 0;
+			end = 0;
+		}
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			switch (cea_db_tag(db)) {
 			case AUDIO_BLOCK:
 				/* Audio Data Block, contains SADs */
 				sad_count = dbl / 3;
-				memcpy(eld + 20 + mnl, &db[1], dbl);
+				if (dbl >= 1)
+					memcpy(eld + 20 + mnl, &db[1], dbl);
 				break;
 			case SPEAKER_BLOCK:
-                                /* Speaker Allocation Data Block */
-				eld[7] = db[1];
+				/* Speaker Allocation Data Block */
+				if (dbl >= 1)
+					eld[7] = db[1];
 				break;
 			case VENDOR_BLOCK:
 				/* HDMI Vendor-Specific Data Block */
-				if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+				if (cea_db_is_hdmi_vsdb(db))
 					parse_hdmi_vsdb(connector, db);
 				break;
 			default:
 				break;
 			}
 		}
+	}
 	eld[5] |= sad_count << 4;
 	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
@@ -1723,38 +1815,26 @@
 bool drm_detect_hdmi_monitor(struct edid *edid)
 {
 	u8 *edid_ext;
-	int i, hdmi_id;
+	int i;
 	int start_offset, end_offset;
-	bool is_hdmi = false;
 
 	edid_ext = drm_find_cea_extension(edid);
 	if (!edid_ext)
-		goto end;
+		return false;
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		return false;
 
 	/*
 	 * Because HDMI identifier is in Vendor Specific Block,
 	 * search it from all data blocks of CEA extension.
 	 */
-	for (i = start_offset; i < end_offset;
-		/* Increased by data block len */
-		i += ((edid_ext[i] & 0x1f) + 1)) {
-		/* Find vendor specific block */
-		if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
-			hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
-				  edid_ext[i + 3] << 16;
-			/* Find HDMI identifier */
-			if (hdmi_id == HDMI_IDENTIFIER)
-				is_hdmi = true;
-			break;
-		}
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+			return true;
 	}
 
-end:
-	return is_hdmi;
+	return false;
 }
 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
@@ -1786,15 +1866,13 @@
 		goto end;
 	}
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		goto end;
 
-	for (i = start_offset; i < end_offset;
-			i += ((edid_ext[i] & 0x1f) + 1)) {
-		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
 			has_audio = true;
-			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+			for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
 				DRM_DEBUG_KMS("CEA audio format %d\n",
 					      (edid_ext[i + j] >> 3) & 0xf);
 			goto end;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0303935..ea9cdab 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@
 	},
 };
 
-static int edid_load(struct drm_connector *connector, char *name,
-		     char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, char *name,
+			char *connector_name)
 {
 	const struct firmware *fw;
 	struct platform_device *pdev;
@@ -123,6 +123,7 @@
 	int fwsize, expected;
 	int builtin = 0, err = 0;
 	int i, valid_extensions = 0;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
 	if (IS_ERR(pdev)) {
@@ -173,7 +174,8 @@
 	}
 	memcpy(edid, fwdata, fwsize);
 
-	if (!drm_edid_block_valid(edid, 0)) {
+	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+		connector->bad_edid_counter++;
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		    name);
 		kfree(edid);
@@ -185,7 +187,7 @@
 		if (i != valid_extensions + 1)
 			memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
 			    edid + i * EDID_LENGTH, EDID_LENGTH);
-		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
+		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
 			valid_extensions++;
 	}
 
@@ -205,7 +207,6 @@
 		edid = new_edid;
 	}
 
-	connector->display_info.raw_edid = edid;
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
 	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +216,10 @@
 	release_firmware(fw);
 
 out:
-	return err;
+	if (err)
+		return ERR_PTR(err);
+
+	return edid;
 }
 
 int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +227,7 @@
 	char *connector_name = drm_get_connector_name(connector);
 	char *edidname = edid_firmware, *last, *colon;
 	int ret;
+	struct edid *edid;
 
 	if (*edidname == '\0')
 		return 0;
@@ -240,13 +245,13 @@
 	if (*last == '\n')
 		*last = '\0';
 
-	ret = edid_load(connector, edidname, connector_name);
-	if (ret)
+	edid = (struct edid *) edid_load(connector, edidname, connector_name);
+	if (IS_ERR_OR_NULL(edid))
 		return 0;
 
-	drm_mode_connector_update_edid_property(connector,
-	    (struct edid *) connector->display_info.raw_edid);
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
 
-	return drm_add_edid_modes(connector, (struct edid *)
-	    connector->display_info.raw_edid);
+	return ret;
 }
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index ff98a7e..57459b3 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -89,7 +89,7 @@
 		   976, 1088, 0, 480, 486, 494, 517, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1024x768@43Hz, interlace */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 772, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -395,7 +395,7 @@
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
 		   1184, 1344, 0,  768, 771, 777, 806, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 776, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
 	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -506,17 +506,17 @@
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 5 - 1920x1080i@60Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 6 - 1440x480i@60Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 7 - 1440x480i@60Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -531,12 +531,12 @@
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 10 - 2880x480i@60Hz */
-	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 11 - 2880x480i@60Hz */
-	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -573,17 +573,17 @@
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 20 - 1920x1080i@50Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 21 - 1440x576i@50Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 22 - 1440x576i@50Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -598,12 +598,12 @@
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 25 - 2880x576i@50Hz */
-	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 26 - 2880x576i@50Hz */
-	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -656,12 +656,12 @@
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 39 - 1920x1080i@50Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
 		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 40 - 1920x1080i@100Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -688,7 +688,7 @@
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 46 - 1920x1080i@120Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -705,12 +705,12 @@
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 50 - 1440x480i@120Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 51 - 1440x480i@120Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -723,12 +723,12 @@
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 54 - 1440x576i@200Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 55 - 1440x576i@200Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -741,12 +741,12 @@
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 58 - 1440x480i@240 */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 59 - 1440x480i@240 */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 0000000..09e11a5
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,406 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *   Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ *  Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/module.h>
+
+struct drm_fb_cma {
+	struct drm_framebuffer		fb;
+	struct drm_gem_cma_object	*obj[4];
+};
+
+struct drm_fbdev_cma {
+	struct drm_fb_helper	fb_helper;
+	struct drm_fb_cma	*fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+	return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (fb_cma->obj[i])
+			drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+	}
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *file_priv, unsigned int *handle)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	return drm_gem_handle_create(file_priv,
+			&fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+	.destroy	= drm_fb_cma_destroy,
+	.create_handle	= drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+	unsigned int num_planes)
+{
+	struct drm_fb_cma *fb_cma;
+	int ret;
+	int i;
+
+	fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+	if (!fb_cma)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+	if (ret) {
+		dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
+		kfree(fb_cma);
+		return ERR_PTR(ret);
+	}
+
+	drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+	for (i = 0; i < num_planes; i++)
+		fb_cma->obj[i] = obj[i];
+
+	return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+	struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_fb_cma *fb_cma;
+	struct drm_gem_cma_object *objs[4];
+	struct drm_gem_object *obj;
+	unsigned int hsub;
+	unsigned int vsub;
+	int ret;
+	int i;
+
+	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+		unsigned int width = mode_cmd->width / (i ? hsub : 1);
+		unsigned int height = mode_cmd->height / (i ? vsub : 1);
+		unsigned int min_size;
+
+		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+		if (!obj) {
+			dev_err(dev->dev, "Failed to lookup GEM object\n");
+			ret = -ENXIO;
+			goto err_gem_object_unreference;
+		}
+
+		min_size = (height - 1) * mode_cmd->pitches[i]
+			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+			 + mode_cmd->offsets[i];
+
+		if (obj->size < min_size) {
+			drm_gem_object_unreference_unlocked(obj);
+			ret = -EINVAL;
+			goto err_gem_object_unreference;
+		}
+		objs[i] = to_drm_gem_cma_obj(obj);
+	}
+
+	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+	if (IS_ERR(fb_cma)) {
+		ret = PTR_ERR(fb_cma);
+		goto err_gem_object_unreference;
+	}
+
+	return &fb_cma->fb;
+
+err_gem_object_unreference:
+	for (i--; i >= 0; i--)
+		drm_gem_object_unreference_unlocked(&objs[i]->base);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+	unsigned int plane)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	if (plane >= 4)
+		return NULL;
+
+	return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+static struct fb_ops drm_fbdev_cma_ops = {
+	.owner		= THIS_MODULE,
+	.fb_fillrect	= sys_fillrect,
+	.fb_copyarea	= sys_copyarea,
+	.fb_imageblit	= sys_imageblit,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+	struct drm_device *dev = helper->dev;
+	struct drm_gem_cma_object *obj;
+	struct drm_framebuffer *fb;
+	unsigned int bytes_per_pixel;
+	unsigned long offset;
+	struct fb_info *fbi;
+	size_t size;
+	int ret;
+
+	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+			sizes->surface_width, sizes->surface_height,
+			sizes->surface_bpp);
+
+	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+		sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	obj = drm_gem_cma_create(dev, size);
+	if (!obj)
+		return -ENOMEM;
+
+	fbi = framebuffer_alloc(0, dev->dev);
+	if (!fbi) {
+		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+		ret = -ENOMEM;
+		goto err_drm_gem_cma_free_object;
+	}
+
+	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+	if (IS_ERR(fbdev_cma->fb)) {
+		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+		ret = PTR_ERR(fbdev_cma->fb);
+		goto err_framebuffer_release;
+	}
+
+	fb = &fbdev_cma->fb->fb;
+	helper->fb = fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &drm_fbdev_cma_ops;
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		dev_err(dev->dev, "Failed to allocate color map.\n");
+		goto err_drm_fb_cma_destroy;
+	}
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+	offset = fbi->var.xoffset * bytes_per_pixel;
+	offset += fbi->var.yoffset * fb->pitches[0];
+
+	dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+	fbi->screen_base = obj->vaddr + offset;
+	fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+	fbi->screen_size = size;
+	fbi->fix.smem_len = size;
+
+	return 0;
+
+err_drm_fb_cma_destroy:
+	drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+	framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+	drm_gem_cma_free_object(&obj->base);
+	return ret;
+}
+
+static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	if (!helper->fb) {
+		ret = drm_fbdev_cma_create(helper, sizes);
+		if (ret < 0)
+			return ret;
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+	.fb_probe = drm_fbdev_cma_probe,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count)
+{
+	struct drm_fbdev_cma *fbdev_cma;
+	struct drm_fb_helper *helper;
+	int ret;
+
+	fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+	if (!fbdev_cma) {
+		dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+	helper = &fbdev_cma->fb_helper;
+
+	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+		goto err_free;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(helper);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to add connectors.\n");
+		goto err_drm_fb_helper_fini;
+
+	}
+
+	ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to set inital hw configuration.\n");
+		goto err_drm_fb_helper_fini;
+	}
+
+	return fbdev_cma;
+
+err_drm_fb_helper_fini:
+	drm_fb_helper_fini(helper);
+err_free:
+	kfree(fbdev_cma);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma->fb_helper.fbdev) {
+		struct fb_info *info;
+		int ret;
+
+		info = fbdev_cma->fb_helper.fbdev;
+		ret = unregister_framebuffer(info);
+		if (ret < 0)
+			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	if (fbdev_cma->fb)
+		drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+
+	drm_fb_helper_fini(&fbdev_cma->fb_helper);
+	kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma)
+		drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma)
+		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f546d1e..b5d05f5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
 
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
 {
 	bool ret, error = false;
 	struct drm_fb_helper *helper;
@@ -330,7 +330,7 @@
 		/* Walk the connectors & encoders on this fb turning them on/off */
 		for (j = 0; j < fb_helper->connector_count; j++) {
 			connector = fb_helper->connector_info[j]->connector;
-			drm_helper_connector_dpms(connector, dpms_mode);
+			connector->funcs->dpms(connector, dpms_mode);
 			drm_connector_property_set_value(connector,
 				dev->mode_config.dpms_property, dpms_mode);
 		}
@@ -1230,7 +1230,6 @@
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_fb_helper_crtc **crtcs;
 	struct drm_display_mode **modes;
-	struct drm_encoder *encoder;
 	struct drm_mode_set *modeset;
 	bool *enabled;
 	int width, height;
@@ -1241,11 +1240,6 @@
 	width = dev->mode_config.max_width;
 	height = dev->mode_config.max_height;
 
-	/* clean out all the encoder/crtc combos */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		encoder->crtc = NULL;
-	}
-
 	crtcs = kcalloc(dev->mode_config.num_connector,
 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
 	modes = kcalloc(dev->mode_config.num_connector,
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 0000000..1aa8fee
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,251 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+static void drm_gem_cma_buf_destroy(struct drm_device *drm,
+		struct drm_gem_cma_object *cma_obj)
+{
+	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
+			cma_obj->paddr);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+		unsigned int size)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	size = round_up(size, PAGE_SIZE);
+
+	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+	if (!cma_obj)
+		return ERR_PTR(-ENOMEM);
+
+	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+	if (!cma_obj->vaddr) {
+		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+		ret = -ENOMEM;
+		goto err_dma_alloc;
+	}
+
+	gem_obj = &cma_obj->base;
+
+	ret = drm_gem_object_init(drm, gem_obj, size);
+	if (ret)
+		goto err_obj_init;
+
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret)
+		goto err_create_mmap_offset;
+
+	return cma_obj;
+
+err_create_mmap_offset:
+	drm_gem_object_release(gem_obj);
+
+err_obj_init:
+	drm_gem_cma_buf_destroy(drm, cma_obj);
+
+err_dma_alloc:
+	kfree(cma_obj);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+		struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int size,
+		unsigned int *handle)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	cma_obj = drm_gem_cma_create(drm, size);
+	if (IS_ERR(cma_obj))
+		return cma_obj;
+
+	gem_obj = &cma_obj->base;
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+	if (ret)
+		goto err_handle_create;
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(gem_obj);
+
+	return cma_obj;
+
+err_handle_create:
+	drm_gem_cma_free_object(gem_obj);
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_gem_cma_object *cma_obj;
+
+	if (gem_obj->map_list.map)
+		drm_gem_free_mmap_offset(gem_obj);
+
+	drm_gem_object_release(gem_obj);
+
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+
+	kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+		struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_cma_object *cma_obj;
+	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+	if (args->pitch < min_pitch)
+		args->pitch = min_pitch;
+
+	if (args->size < args->pitch * args->height)
+		args->size = args->pitch * args->height;
+
+	cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+			args->size, &args->handle);
+	if (IS_ERR(cma_obj))
+		return PTR_ERR(cma_obj);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *gem_obj;
+
+	mutex_lock(&drm->struct_mutex);
+
+	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+	if (!gem_obj) {
+		dev_err(drm->dev, "failed to lookup gem object\n");
+		mutex_unlock(&drm->struct_mutex);
+		return -EINVAL;
+	}
+
+	*offset = get_gem_mmap_offset(gem_obj);
+
+	drm_gem_object_unreference(gem_obj);
+
+	mutex_unlock(&drm->struct_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem_obj;
+	struct drm_gem_cma_object *cma_obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	gem_obj = vma->vm_private_data;
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+/*
+ * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03f16f3..076c4a8 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@
 	return ret;
 }
 
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 961ee08..6fed215 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@
 		tmp = pgprot_writecombine(tmp);
 	else
 		tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 	tmp = pgprot_noncached(tmp);
 #endif
 	return tmp;
@@ -619,20 +619,11 @@
 		offset = drm_core_get_reg_ofs(dev);
 		vma->vm_flags |= VM_IO;	/* not in core dump */
 		vma->vm_page_prot = drm_io_prot(map->type, vma);
-#if !defined(__arm__)
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       (map->offset + offset) >> PAGE_SHIFT,
 				       vma->vm_end - vma->vm_start,
 				       vma->vm_page_prot))
 			return -EAGAIN;
-#else
-		if (remap_pfn_range(vma, vma->vm_start,
-					(map->offset + offset) >> PAGE_SHIFT,
-					vma->vm_end - vma->vm_start,
-					vma->vm_page_prot))
-			return -EAGAIN;
-#endif
-
 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 			  " offset = 0x%llx\n",
 			  map->type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index d956819..9dce3b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@
 
 		drm_mode_connector_update_edid_property(connector, edid);
 		count = drm_add_edid_modes(connector, edid);
-
-		kfree(connector->display_info.raw_edid);
-		connector->display_info.raw_edid = edid;
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(connector->dev);
 		struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 537027a..e364165 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@
 				u8 *edid, int len)
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
-	struct edid *raw_edid;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -115,18 +114,6 @@
 		return -EFAULT;
 	}
 
-	raw_edid = kzalloc(len, GFP_KERNEL);
-	if (!raw_edid) {
-		DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
-		return -ENOMEM;
-	}
-
-	memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
-						* EDID_LENGTH, len));
-
-	/* attach the edid data to connector. */
-	connector->display_info.raw_edid = (char *)raw_edid;
-
 	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
 					* EDID_LENGTH, len));
 
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93..7a2d40a 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
 #
 ccflags-y += -I$(srctree)/include/drm
 
-gma500_gfx-y += gem_glue.o \
+gma500_gfx-y += \
 	  accel_2d.o \
 	  backlight.o \
 	  framebuffer.o \
@@ -30,7 +30,8 @@
 	  cdv_intel_crt.o \
 	  cdv_intel_display.o \
 	  cdv_intel_hdmi.o \
-	  cdv_intel_lvds.o
+	  cdv_intel_lvds.o \
+	  cdv_intel_dp.o
 
 gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
 	  oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 2079395..143eba3 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
 #include "intel_bios.h"
 #include "power.h"
 
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	backlight_update_status(dev_priv->backlight_device);
+#endif	
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = false;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = 0;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_level = v;
+	if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+		dev_priv->backlight_device->props.brightness = v;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
 int gma_backlight_init(struct drm_device *dev)
 {
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
 	return dev_priv->ops->backlight_init(dev);
 #else
 	return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index b7e7b49..bfc2f39 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@
 	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
 
 	/* These bits indicate HDMI not SDVO on CDV */
-	if (REG_READ(SDVOB) & SDVO_DETECTED)
+	if (REG_READ(SDVOB) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
-	if (REG_READ(SDVOC) & SDVO_DETECTED)
+		if (REG_READ(DP_B) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+	}
+
+	if (REG_READ(SDVOC) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+		if (REG_READ(DP_C) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+	}
 	return 0;
 }
 
@@ -163,6 +170,7 @@
 			cdv_get_brightness(cdv_backlight_device);
 	backlight_update_status(cdv_backlight_device);
 	dev_priv->backlight_device = cdv_backlight_device;
+	dev_priv->backlight_enabled = true;
 	return 0;
 }
 
@@ -449,6 +457,7 @@
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
@@ -488,6 +497,65 @@
 	}	
 }
 
+static const char *force_audio_names[] = {
+	"off",
+	"auto",
+	"on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "audio",
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_connector_attach_property(connector, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+	"Full",
+	"Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_connector_attach_property(connector, prop, 0);
+}
+
 /* Cedarview */
 static const struct psb_offset cdv_regmap[2] = {
 	{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509b..3cfd093 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@
 struct cdv_intel_limit_t {
 	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
 	struct cdv_intel_p2_t p2;
+	bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+			int, int, struct cdv_intel_clock_t *);
 };
 
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock);
+
 #define CDV_LIMIT_SINGLE_LVDS_96	0
 #define CDV_LIMIT_SINGLE_LVDS_100	1
 #define CDV_LIMIT_DAC_HDMI_27		2
 #define CDV_LIMIT_DAC_HDMI_96		3
+#define CDV_LIMIT_DP_27			4
+#define CDV_LIMIT_DP_100		5
 
 static const struct cdv_intel_limit_t cdv_intel_limits[] = {
-	{			/* CDV_SIGNLE_LVDS_96MHz */
+	{			/* CDV_SINGLE_LVDS_96MHz */
 	 .dot = {.min = 20000, .max = 115500},
 	 .vco = {.min = 1800000, .max = 3600000},
 	 .n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@
 	 .p1 = {.min = 2, .max = 10},
 	 .p2 = {.dot_limit = 200000,
 		.p2_slow = 14, .p2_fast = 14},
+		.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_SINGLE_LVDS_100MHz */
 	 .dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@
 	  * is 80-224Mhz.  Prefer single channel as much as possible.
 	  */
 	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_27MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@
 	 .p = {.min = 5, .max = 90},
 	 .p1 = {.min = 1, .max = 9},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_96MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@
 	 .p = {.min = 5, .max = 100},
 	 .p1 = {.min = 1, .max = 10},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
+	{			/* CDV_DP_27MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1809000, .max = 3564000},
+	 .n = {.min = 1, .max = 1},
+	 .m = {.min = 67, .max = 132},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 65, .max = 130},
+	 .p = {.min = 5, .max = 90},
+	 .p1 = {.min = 1, .max = 9},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 },
+	{			/* CDV_DP_100MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 164},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 162},
+	 .p = {.min = 5, .max = 100},
+	 .p1 = {.min = 1, .max = 10},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 }	
 };
 
 #define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 
 
-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
 {
 	int ret;
 
@@ -159,7 +198,7 @@
 	return 0;
 }
 
-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
 {
 	int ret;
 	static bool dpio_debug = true;
@@ -201,7 +240,7 @@
 /* Reset the DPIO configuration register.  The BIOS does this at every
  * mode set.
  */
-static void cdv_sb_reset(struct drm_device *dev)
+void cdv_sb_reset(struct drm_device *dev)
 {
 
 	REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@
  */
 static int
 cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
-			       struct cdv_intel_clock_t *clock, bool is_lvds)
+			       struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
 {
 	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@
 	ref_value &= ~(REF_CLK_MASK);
 
 	/* use DPLL_A for pipeB on CRT/HDMI */
-	if (pipe == 1 && !is_lvds) {
+	if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
 		DRM_DEBUG_KMS("use DPLLA for pipe B\n");
 		ref_value |= REF_CLK_DPLLA;
 	} else {
@@ -336,30 +375,33 @@
 	if (ret)
 		return ret;
 
-	lane_reg = PSB_LANE0;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
+	if (ddi_select) {
+		if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+			lane_reg = PSB_LANE0;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
 
-	lane_reg = PSB_LANE1;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
+			lane_reg = PSB_LANE1;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		} else {
+			lane_reg = PSB_LANE2;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
 
-	lane_reg = PSB_LANE2;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
-	lane_reg = PSB_LANE3;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
+			lane_reg = PSB_LANE3;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		}
+	}
 	return 0;
 }
 
@@ -396,6 +438,12 @@
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
 		else
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+			psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		if (refclk == 27000)
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
 	} else {
 		if (refclk == 27000)
 			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@
 	return true;
 }
 
-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
-				int refclk,
-				struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock)
 {
 	struct drm_device *dev = crtc->dev;
 	struct cdv_intel_clock_t clock;
-	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
 	int err = target;
 
 
@@ -498,6 +545,49 @@
 	return err != target;
 }
 
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock)
+{
+	struct cdv_intel_clock_t clock;
+	if (refclk == 27000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 118;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 98;
+		}
+	} else if (refclk == 100000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 160;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 133;
+		}
+	} else
+		return false;
+	clock.m = clock.m2 + 2;
+	clock.p = clock.p1 * clock.p2;
+	clock.vco = (refclk * clock.m) / clock.n;
+	clock.dot = clock.vco / clock.p;
+	memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+	return true;
+}
+
 static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
@@ -791,7 +881,7 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		if (psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = true;
 
@@ -835,17 +925,15 @@
 		REG_WRITE(map->status, temp);
 		REG_READ(map->status);
 
-		cdv_intel_update_watermark(dev, crtc);
 		cdv_intel_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
-		psb_intel_crtc->crtc_enable = true;
 		break;
 	case DRM_MODE_DPMS_OFF:
 		if (!psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = false;
 
@@ -892,10 +980,9 @@
 
 		/* Wait for the clocks to turn off. */
 		udelay(150);
-		cdv_intel_update_watermark(dev, crtc);
-		psb_intel_crtc->crtc_enable = false;
 		break;
 	}
+	cdv_intel_update_watermark(dev, crtc);
 	/*Set FIFO Watermarks*/
 	REG_WRITE(DSPARB, 0x3F3E);
 }
@@ -952,9 +1039,12 @@
 	u32 dpll = 0, dspcntr, pipeconf;
 	bool ok;
 	bool is_crt = false, is_lvds = false, is_tv = false;
-	bool is_hdmi = false;
+	bool is_hdmi = false, is_dp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
+	const struct cdv_intel_limit_t *limit;
+	u32 ddi_select = 0;
+	bool is_edp = false;
 
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@
 		    || connector->encoder->crtc != crtc)
 			continue;
 
+		ddi_select = psb_intel_encoder->ddi_select;
 		switch (psb_intel_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
@@ -977,6 +1068,15 @@
 		case INTEL_OUTPUT_HDMI:
 			is_hdmi = true;
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_edp = true;
+			break;
+		default:
+			DRM_ERROR("invalid output type.\n");
+			return 0;
 		}
 	}
 
@@ -986,6 +1086,20 @@
 	else
 		/* high-end sku, 27/100 mhz */
 		refclk = 27000;
+	if (is_dp || is_edp) {
+		/*
+		 * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+		 * unnecessary to consider it for DP/eDP.
+		 * On the high-end SKU, it will use the 27/100M reference clk
+		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+		 */ 
+		if (pipe == 0)
+			refclk = 27000;
+		else
+			refclk = 100000;
+	}
 
 	if (is_lvds && dev_priv->lvds_use_ssc) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@
 	}
 
 	drm_mode_debug_printmodeline(adjusted_mode);
+	
+	limit = cdv_intel_limit(crtc, refclk);
 
-	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
 				 &clock);
 	if (!ok) {
 		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@
 	}
 /*		dpll |= PLL_REF_INPUT_DREFCLK; */
 
+	if (is_dp || is_edp) {
+		cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	} else {
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+	}
+
 	dpll |= DPLL_SYNCLOCK_ENABLE;
 /*	if (is_lvds)
 		dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@
 	/* setup pipeconf */
 	pipeconf = REG_READ(map->conf);
 
+	pipeconf &= ~(PIPE_BPC_MASK);
+	if (is_edp) {
+		switch (dev_priv->edp.bpp) {
+		case 24:
+			pipeconf |= PIPE_8BPC;
+			break;
+		case 18:
+			pipeconf |= PIPE_6BPC;
+			break;
+		case 30:
+			pipeconf |= PIPE_10BPC;
+			break;
+		default:
+			pipeconf |= PIPE_8BPC;
+			break;
+		}
+	} else if (is_lvds) {
+		/* the BPC will be 6 if it is 18-bit LVDS panel */
+		if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+			pipeconf |= PIPE_8BPC;
+		else
+			pipeconf |= PIPE_6BPC;
+	} else
+		pipeconf |= PIPE_8BPC;
+			
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -1033,7 +1183,7 @@
 	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
 	REG_READ(map->dpll);
 
-	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
+	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
 
 	udelay(150);
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 0000000..c9abc06
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1951 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "drm_dp_helper.h"
+
+#define _wait_for(COND, MS, W) ({ \
+        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+        int ret__ = 0;                                                  \
+        while (! (COND)) {                                              \
+                if (time_after(jiffies, timeout__)) {                   \
+                        ret__ = -ETIMEDOUT;                             \
+                        break;                                          \
+                }                                                       \
+                if (W && !in_dbg_master()) msleep(W);                   \
+        }                                                               \
+        ret__;                                                          \
+})      
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE	6
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+#define CDV_FAST_LINK_TRAIN	1
+
+struct cdv_intel_dp {
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	int force_audio;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[4];
+	struct psb_intel_encoder *encoder;
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	uint8_t	train_set[4];
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	bool panel_on;
+};
+
+struct ddi_regoff {
+	uint32_t	PreEmph1;
+	uint32_t	PreEmph2;
+	uint32_t	VSwing1;
+	uint32_t	VSwing2;
+	uint32_t	VSwing3;
+	uint32_t	VSwing4;
+	uint32_t	VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+	{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+	.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+	.VSwing5 = 0x8158,},
+	{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+	.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+	.VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+        0x55338954,	0x4000,
+        0x554d8954,	0x2000,
+        0x55668954,	0,
+        0x559ac0d4,	0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+	return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_lane_count = 4;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+		switch (max_lane_count) {
+		case 1: case 2: case 4:
+			break;
+		default:
+			max_lane_count = 4;
+		}
+	}
+	return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+	default:
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+	if (link_bw == DP_LINK_BW_2_7)
+		return 270000;
+	else
+		return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	if (intel_dp->panel_on) {
+		DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+		return;
+	}	
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+	if (intel_dp->panel_on)
+		return true;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+	pp &= ~PANEL_UNLOCK_MASK;
+
+	pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+	if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+		DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+		intel_dp->panel_on = false;
+	} else
+		intel_dp->panel_on = true;	
+	msleep(intel_dp->panel_power_up_delay);
+
+	return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp, idle_off_mask = PP_ON ;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	if ((pp & POWER_TARGET_ON) == 0) 
+		return;
+
+	intel_dp->panel_on = false;
+	pp &= ~PANEL_UNLOCK_MASK;
+	/* ILK workaround: disable reset around power sequence */
+
+	pp &= ~POWER_TARGET_ON;
+	pp &= ~EDP_FORCE_VDD;
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+	if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+		DRM_DEBUG_KMS("Error in turning off Panel\n");	
+	}
+
+	msleep(intel_dp->panel_power_cycle_delay);
+	DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	msleep(300);
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	gma_backlight_disable(dev);
+	msleep(10);
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+	int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+	if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	/* only refuse the mode on non eDP since we have seen some weird eDP panels
+	   which are outside spec tolerances but somehow work by magic */
+	if (!is_edp(encoder) &&
+	    (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+	     > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+		return MODE_CLOCK_HIGH;
+
+	if (is_edp(encoder)) {
+	    if (cdv_intel_dp_link_required(mode->clock, 24)
+	     	> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+		return MODE_CLOCK_HIGH;
+		
+	}
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+		uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t output_reg = intel_dp->output_reg;
+	struct drm_device *dev = encoder->base.dev;
+	uint32_t ch_ctl = output_reg + 0x10;
+	uint32_t ch_data = ch_ctl + 4;
+	int i;
+	int recv_bytes;
+	uint32_t status;
+	uint32_t aux_clock_divider;
+	int try, precharge;
+
+	/* The clock divider is based off the hrawclk,
+	 * and would like to run at 2MHz. So, take the
+	 * hrawclk value and divide by 2 and use that
+	 * On CDV platform it uses 200MHz as hrawclk.
+	 *
+	 */
+	aux_clock_divider = 200 / 2;
+
+	precharge = 4;
+	if (is_edp(encoder))
+		precharge = 10;
+
+	if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+		DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+			  REG_READ(ch_ctl));
+		return -EBUSY;
+	}
+
+	/* Must try at least 3 times according to DP spec */
+	for (try = 0; try < 5; try++) {
+		/* Load the send data into the aux channel data registers */
+		for (i = 0; i < send_bytes; i += 4)
+			REG_WRITE(ch_data + i,
+				   pack_aux(send + i, send_bytes - i));
+	
+		/* Send the command and wait for it to complete */
+		REG_WRITE(ch_ctl,
+			   DP_AUX_CH_CTL_SEND_BUSY |
+			   DP_AUX_CH_CTL_TIME_OUT_400us |
+			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		for (;;) {
+			status = REG_READ(ch_ctl);
+			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+				break;
+			udelay(100);
+		}
+	
+		/* Clear done status and any errors */
+		REG_WRITE(ch_ctl,
+			   status |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		if (status & DP_AUX_CH_CTL_DONE)
+			break;
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		return -EBUSY;
+	}
+
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		return -EIO;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		return -ETIMEDOUT;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+	
+	for (i = 0; i < recv_bytes; i += 4)
+		unpack_aux(REG_READ(ch_data + i),
+			   recv + i, recv_bytes - i);
+
+	return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+			  uint16_t address, uint8_t *send, int send_bytes)
+{
+	int ret;
+	uint8_t	msg[20];
+	int msg_bytes;
+	uint8_t	ack;
+
+	if (send_bytes > 16)
+		return -1;
+	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = send_bytes - 1;
+	memcpy(&msg[4], send, send_bytes);
+	msg_bytes = send_bytes + 4;
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+		if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			break;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+	return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+			    uint16_t address, uint8_t byte)
+{
+	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+			 uint16_t address, uint8_t *recv, int recv_bytes)
+{
+	uint8_t msg[4];
+	int msg_bytes;
+	uint8_t reply[20];
+	int reply_bytes;
+	uint8_t ack;
+	int ret;
+
+	msg[0] = AUX_NATIVE_READ << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = recv_bytes - 1;
+
+	msg_bytes = 4;
+	reply_bytes = recv_bytes + 1;
+
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret == 0)
+			return -EPROTO;
+		if (ret < 0)
+			return ret;
+		ack = reply[0];
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+			memcpy(recv, reply + 1, ret - 1);
+			return ret - 1;
+		}
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct cdv_intel_dp *intel_dp = container_of(adapter,
+						struct cdv_intel_dp,
+						adapter);
+	struct psb_intel_encoder *encoder = intel_dp->encoder;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (retry = 0; retry < 5; retry++) {
+		ret = cdv_intel_dp_aux_ch(encoder,
+				      msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			udelay(100);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+				  reply[0]);
+			return -EREMOTEIO;
+		}
+
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_on(encoder);
+	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_off(encoder);
+	
+	return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+	struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	int lane_count, clock;
+	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+	int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+	int refclock = mode->clock;
+	int bpp = 24;
+
+	if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+		cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+		refclock = intel_dp->panel_fixed_mode->clock;
+		bpp = dev_priv->edp.bpp;
+	}
+
+	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+		for (clock = max_clock; clock >= 0; clock--) {
+			int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+			if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+				intel_dp->link_bw = bws[clock];
+				intel_dp->lane_count = lane_count;
+				adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
+				       intel_dp->link_bw, intel_dp->lane_count,
+				       adjusted_mode->clock);
+				return true;
+			}
+		}
+	}
+	if (is_edp(intel_encoder)) {
+		/* okay we failed just pick the highest */
+		intel_dp->lane_count = max_lane_count;
+		intel_dp->link_bw = bws[max_clock];
+		adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+			      "count %d clock %d\n",
+			      intel_dp->link_bw, intel_dp->lane_count,
+			      adjusted_mode->clock);
+
+		return true;
+	}
+	return false;
+}
+
+struct cdv_intel_dp_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+	/*
+	while (*num > 0xffffff || *den > 0xffffff) {
+		*num >>= 1;
+		*den >>= 1;
+	}*/
+	uint64_t value, m;
+	m = *num;
+	value = m * (0x800000);
+	m = do_div(value, *den);
+	*num = value;
+	*den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+		     int nlanes,
+		     int pixel_clock,
+		     int link_clock,
+		     struct cdv_intel_dp_m_n *m_n)
+{
+	m_n->tu = 64;
+	m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+	m_n->gmch_n = link_clock * nlanes;
+	cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+	m_n->link_m = pixel_clock;
+	m_n->link_n = link_clock;
+	cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_encoder *encoder;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	int lane_count = 4, bpp = 24;
+	struct cdv_intel_dp_m_n m_n;
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * Find the lane count in the intel_encoder private
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct psb_intel_encoder *intel_encoder;
+		struct cdv_intel_dp *intel_dp;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		intel_encoder = to_psb_intel_encoder(encoder);
+		intel_dp = intel_encoder->dev_priv;
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+			lane_count = intel_dp->lane_count;
+			break;
+		} else if (is_edp(intel_encoder)) {
+			lane_count = intel_dp->lane_count;
+			bpp = dev_priv->edp.bpp;
+			break;
+		}
+	}
+
+	/*
+	 * Compute the GMCH and Link ratios. The '3' here is
+	 * the number of bytes_per_pixel post-LUT, which we always
+	 * set up for 8-bits of R/G/B, or 3 bytes total.
+	 */
+	cdv_intel_dp_compute_m_n(bpp, lane_count,
+			     mode->clock, adjusted_mode->clock, &m_n);
+
+	{
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+	}
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+
+	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= intel_dp->color_range;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		intel_dp->DP |= DP_SYNC_HS_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+	intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+	switch (intel_dp->lane_count) {
+	case 1:
+		intel_dp->DP |= DP_PORT_WIDTH_1;
+		break;
+	case 2:
+		intel_dp->DP |= DP_PORT_WIDTH_2;
+		break;
+	case 4:
+		intel_dp->DP |= DP_PORT_WIDTH_4;
+		break;
+	}
+	if (intel_dp->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+		intel_dp->DP |= DP_ENHANCED_FRAMING;
+	}
+
+	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
+	if (intel_crtc->pipe == 1)
+		intel_dp->DP |= DP_PIPEB_SELECT;
+
+	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+	DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+	if (is_edp(intel_encoder)) {
+		uint32_t pfit_control;
+		cdv_intel_edp_panel_on(intel_encoder);
+
+		if (mode->hdisplay != adjusted_mode->hdisplay ||
+			    mode->vdisplay != adjusted_mode->vdisplay)
+			pfit_control = PFIT_ENABLE;
+		else
+			pfit_control = 0;
+
+		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+		REG_WRITE(PFIT_CONTROL, pfit_control);
+	}
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+						  DP_SET_POWER_D3);
+		if (ret != 1)
+			DRM_DEBUG_DRIVER("failed to write sink power state\n");
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = cdv_intel_dp_aux_native_write_1(encoder,
+							  DP_SET_POWER,
+							  DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			udelay(1000);
+		}
+	}
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp) {
+		cdv_intel_edp_backlight_off(intel_encoder);
+		cdv_intel_edp_panel_off(intel_encoder);
+		cdv_intel_edp_panel_vdd_on(intel_encoder);
+        }
+	/* Wake up the sink first */
+	cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+	cdv_intel_dp_link_down(intel_encoder);
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_on(intel_encoder);
+	cdv_intel_dp_start_link_train(intel_encoder);
+	cdv_intel_dp_complete_link_train(intel_encoder);
+	if (edp)
+		cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+	uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+	int edp = is_edp(intel_encoder);
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		if (edp) {
+			cdv_intel_edp_backlight_off(intel_encoder);
+			cdv_intel_edp_panel_vdd_on(intel_encoder);
+		}
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		cdv_intel_dp_link_down(intel_encoder);
+		if (edp) {
+			cdv_intel_edp_panel_vdd_off(intel_encoder);
+			cdv_intel_edp_panel_off(intel_encoder);
+		}
+	} else {
+        	if (edp)
+			cdv_intel_edp_panel_on(intel_encoder);
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		if (!(dp_reg & DP_PORT_EN)) {
+			cdv_intel_dp_start_link_train(intel_encoder);
+			cdv_intel_dp_complete_link_train(intel_encoder);
+		}
+		if (edp)
+        		cdv_intel_edp_backlight_on(intel_encoder);
+	}
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+			       uint8_t *recv, int recv_bytes)
+{
+	int ret, i;
+
+	/*
+	 * Sinks are *supposed* to come up within 1ms from an off state,
+	 * but we're also supposed to retry 3 times per the spec.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+					       recv_bytes);
+		if (ret == recv_bytes)
+			return true;
+		udelay(1000);
+	}
+
+	return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	return cdv_intel_dp_aux_native_read_retry(encoder,
+					      DP_LANE0_1_STATUS,
+					      intel_dp->link_status,
+					      DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		     int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				 int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				      int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char	*voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char	*pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char	*link_train_names[] = {
+	"pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+		uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+	
+	if (v >= CDV_DP_VOLTAGE_MAX)
+		v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+		
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		      int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+	int lane;
+	uint8_t lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+			 DP_LANE_CHANNEL_EQ_DONE|\
+			 DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t lane_align;
+	uint8_t lane_status;
+	int lane;
+
+	lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+					  DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+			uint32_t dp_reg_value,
+			uint8_t dp_train_pat)
+{
+	
+	struct drm_device *dev = encoder->base.dev;
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	REG_WRITE(intel_dp->output_reg, dp_reg_value);
+	REG_READ(intel_dp->output_reg);
+
+	ret = cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET,
+				    dp_train_pat);
+
+	if (ret != 1) {
+		DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+				dp_train_pat);
+		return false;
+	}
+
+	return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+			uint8_t dp_train_pat)
+{
+	
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	ret = cdv_intel_dp_aux_native_write(encoder,
+					DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	if (ret != intel_dp->lane_count) {
+		DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+				intel_dp->train_set[0], intel_dp->lane_count);
+		return false;
+	}
+	return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct ddi_regoff *ddi_reg;
+	int vswing, premph, index;
+
+	if (intel_dp->output_reg == DP_B)
+		ddi_reg = &ddi_DP_train_table[0];
+	else
+		ddi_reg = &ddi_DP_train_table[1];
+
+	vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+	premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+				DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+	if (vswing + premph > 3)
+		return;
+#ifdef CDV_FAST_LINK_TRAIN
+	return;
+#endif
+	DRM_DEBUG_KMS("Test2\n");
+	//return ;
+	cdv_sb_reset(dev);
+	/* ;Swing voltage programming
+        ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+	cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+	/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+	/* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+	 * The VSwing_PreEmph table is also considered based on the vswing/premp
+	 */
+	index = (vswing + premph) * 2;
+	if (premph == 1 && vswing == 1) {
+		cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+	} else
+		cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+	/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+	if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+	else
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+	/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+	/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+	/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+	/* ;Pre emphasis programming
+	 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+	 */
+	cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+	/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+	index = 2 * premph + 1;
+	cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+	return;	
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int i;
+	uint8_t voltage;
+	bool clock_recovery = false;
+	int tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	DP |= DP_PORT_EN;
+	DP &= ~DP_LINK_TRAIN_MASK;
+		
+	reg = DP;	
+	reg |= DP_LINK_TRAIN_PAT_1;
+	/* Enable output, wait for it to become active */
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	psb_intel_wait_for_vblank(dev);
+
+	DRM_DEBUG_KMS("Link config\n");
+	/* Write the link configuration data */
+	cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  2);
+
+	memset(intel_dp->train_set, 0, 4);
+	voltage = 0;
+	tries = 0;
+	clock_recovery = false;
+
+	DRM_DEBUG_KMS("Start train\n");
+		reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+	for (;;) {
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+		}
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+		/* Set training pattern 1 */
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+		udelay(200);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("PT1 train is done\n");
+			clock_recovery = true;
+			break;
+		}
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count)
+			break;
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5)
+				break;
+		} else
+			tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+
+	}
+
+	if (!clock_recovery) {
+		DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+	}
+	
+	intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	bool channel_eq = false;
+	int tries, cr_tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	/* channel equalization */
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+
+	DRM_DEBUG_KMS("\n");
+		reg = DP | DP_LINK_TRAIN_PAT_2;
+
+	for (;;) {
+
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+        	/* channel eq pattern */
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg,
+					     DP_TRAINING_PATTERN_2)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+		}
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			cdv_intel_dp_link_down(encoder);
+			break;
+		}
+
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+		udelay(1000);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		/* Make sure clock is still ok */
+		if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			cdv_intel_dp_start_link_train(encoder);
+			cr_tries++;
+			continue;
+		}
+
+		if (cdv_intel_channel_eq_ok(encoder)) {
+			DRM_DEBUG_KMS("PT2 train is done\n");
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			cdv_intel_dp_link_down(encoder);
+			cdv_intel_dp_start_link_train(encoder);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+		++tries;
+
+	}
+
+	reg = DP | DP_LINK_TRAIN_OFF;
+
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t DP = intel_dp->DP;
+
+	if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+
+	{
+		DP &= ~DP_LINK_TRAIN_MASK;
+		REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+	}
+	REG_READ(intel_dp->output_reg);
+
+	msleep(17);
+
+	REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+
+	status = connector_status_disconnected;
+	if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+	{
+		if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+			status = connector_status_connected;
+	}
+	if (status == connector_status_connected)
+		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+			intel_dp->dpcd[0], intel_dp->dpcd[1],
+			intel_dp->dpcd[2], intel_dp->dpcd[3]);
+	return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+	struct edid *edid = NULL;
+	int edp = is_edp(encoder);
+
+	intel_dp->has_audio = false;
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+	status = cdv_dp_detect(encoder);
+	if (status != connector_status_connected) {
+		if (edp)
+			cdv_intel_edp_panel_vdd_off(encoder);
+		return status;
+        }
+
+	if (intel_dp->force_audio) {
+		intel_dp->has_audio = intel_dp->force_audio > 0;
+	} else {
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
+			kfree(edid);
+		}
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct edid *edid = NULL;
+	int ret = 0;
+	int edp = is_edp(intel_encoder);
+
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	if (is_edp(intel_encoder)) {
+		struct drm_device *dev = connector->dev;
+		struct drm_psb_private *dev_priv = dev->dev_private;
+		
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+		if (ret) {
+			if (edp && !intel_dp->panel_fixed_mode) {
+				struct drm_display_mode *newmode;
+				list_for_each_entry(newmode, &connector->probed_modes,
+					    head) {
+					if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+						intel_dp->panel_fixed_mode =
+							drm_mode_duplicate(dev, newmode);
+						break;
+					}
+				}
+			}
+
+			return ret;
+		}
+		if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			intel_dp->panel_fixed_mode =
+				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (intel_dp->panel_fixed_mode) {
+				intel_dp->panel_fixed_mode->type |=
+					DRM_MODE_TYPE_PREFERRED;
+			}
+		}
+		if (intel_dp->panel_fixed_mode != NULL) {
+			struct drm_display_mode *mode;
+			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct edid *edid;
+	bool has_audio = false;
+	int edp = is_edp(encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	ret = drm_connector_property_set_value(connector, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == 0)
+			has_audio = cdv_intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_dp->color_range)
+			return 0;
+
+		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (encoder->base.crtc) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y,
+					 crtc->fb);
+	}
+
+	return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+	if (is_edp(psb_intel_encoder)) {
+	/*	cdv_intel_panel_destroy_backlight(connector->dev); */
+		if (intel_dp->panel_fixed_mode) {
+			kfree(intel_dp->panel_fixed_mode);
+			intel_dp->panel_fixed_mode = NULL;
+		}
+	}
+	i2c_del_adapter(&intel_dp->adapter);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+	.dpms = cdv_intel_dp_dpms,
+	.mode_fixup = cdv_intel_dp_mode_fixup,
+	.prepare = cdv_intel_dp_prepare,
+	.mode_set = cdv_intel_dp_mode_set,
+	.commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cdv_intel_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_intel_dp_set_property,
+	.destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+	.get_modes = cdv_intel_dp_get_modes,
+	.mode_valid = cdv_intel_dp_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+	.destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+	cdv_intel_attach_force_audio_property(connector);
+	cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+
+		if (p_child->dvo_port == PORT_IDPC &&
+		    p_child->device_type == DEVICE_TYPE_eDP)
+			return true;
+	}
+	return false;
+}
+
+/* Cedarview display clock gating
+
+   We need this disable dot get correct behaviour while enabling
+   DP/eDP. TODO - investigate if we can turn it back to normality
+   after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+	u32 reg_value;
+	reg_value = REG_READ(DSPCLK_GATE_D);
+
+	reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+			DPUNIT_PIPEA_GATE_DISABLE |
+			DPCUNIT_CLOCK_GATE_DISABLE |
+			DPLSUNIT_CLOCK_GATE_DISABLE |
+			DPOUNIT_CLOCK_GATE_DISABLE |
+		 	DPIOUNIT_CLOCK_GATE_DISABLE);	
+
+	REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+	udelay(500);		
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct cdv_intel_dp *intel_dp;
+	const char *name = NULL;
+	int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+        if (!psb_intel_connector)
+                goto err_connector;
+	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+	if (!intel_dp)
+	        goto err_priv;
+
+	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+		type = DRM_MODE_CONNECTOR_eDP;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+
+	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+	if (type == DRM_MODE_CONNECTOR_DisplayPort)
+        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+        else
+		psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+	psb_intel_encoder->dev_priv=intel_dp;
+	intel_dp->encoder = psb_intel_encoder;
+	intel_dp->output_reg = output_reg;
+	
+	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+	drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_sysfs_connector_add(connector);
+
+	/* Set up the DDC bus. */
+	switch (output_reg) {
+		case DP_B:
+			name = "DPDDC-B";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+			break;
+		case DP_C:
+			name = "DPDDC-C";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+			break;
+	}
+
+	cdv_disable_intel_clock_gating(dev);
+
+	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+        /* FIXME:fail check */
+	cdv_intel_dp_add_properties(connector);
+
+	if (is_edp(psb_intel_encoder)) {
+		int ret;
+		struct edp_power_seq cur;
+                u32 pp_on, pp_off, pp_div;
+		u32 pwm_ctrl;
+
+		pp_on = REG_READ(PP_CONTROL);
+		pp_on &= ~PANEL_UNLOCK_MASK;
+	        pp_on |= PANEL_UNLOCK_REGS;
+		
+		REG_WRITE(PP_CONTROL, pp_on);
+
+		pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+		pwm_ctrl |= PWM_PIPE_B;
+		REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+                pp_on = REG_READ(PP_ON_DELAYS);
+                pp_off = REG_READ(PP_OFF_DELAYS);
+                pp_div = REG_READ(PP_DIVISOR);
+	
+		/* Pull timing values out of registers */
+                cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                        PANEL_POWER_UP_DELAY_SHIFT;
+
+                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                        PANEL_LIGHT_ON_DELAY_SHIFT;
+
+                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                        PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+                cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                        PANEL_POWER_DOWN_DELAY_SHIFT;
+
+                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+                               PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+                DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                              cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+		intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+                intel_dp->backlight_on_delay = cur.t8 / 10;
+                intel_dp->backlight_off_delay = cur.t9 / 10;
+                intel_dp->panel_power_down_delay = cur.t10 / 10;
+                intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+                DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                              intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                              intel_dp->panel_power_cycle_delay);
+
+                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+		cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+					       intel_dp->dpcd,
+					       sizeof(intel_dp->dpcd));
+		cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+		if (ret == 0) {
+			/* if this fails, presume the device is a ghost */
+			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+			cdv_intel_dp_encoder_destroy(encoder);
+			cdv_intel_dp_destroy(connector);
+			goto err_priv;
+		} else {
+        		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+				intel_dp->dpcd[0], intel_dp->dpcd[1], 
+				intel_dp->dpcd[2], intel_dp->dpcd[3]);
+			
+		}
+		/* The CDV reference driver moves pnale backlight setup into the displays that
+		   have a backlight: this is a good idea and one we should probably adopt, however
+		   we need to migrate all the drivers before we can do that */
+                /*cdv_intel_panel_setup_backlight(dev); */
+	}
+	return;
+
+err_priv:
+	kfree(psb_intel_connector);
+err_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b..7272a46 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -139,8 +139,6 @@
 {
 	struct psb_intel_encoder *psb_intel_encoder =
 					psb_intel_attached_encoder(connector);
-	struct psb_intel_connector *psb_intel_connector =
-					to_psb_intel_connector(connector);
 	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
 	struct edid *edid = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
@@ -157,8 +155,6 @@
 			hdmi_priv->has_hdmi_audio =
 						drm_detect_monitor_audio(edid);
 		}
-
-		psb_intel_connector->base.display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return status;
@@ -352,9 +348,11 @@
 	switch (reg) {
 	case SDVOB:
 		ddc_bus = GPIOE;
+		psb_intel_encoder->ddi_select = DDI0_SELECT;
 		break;
 	case SDVOC:
 		ddc_bus = GPIOD;
+		psb_intel_encoder->ddi_select = DDI1_SELECT;
 		break;
 	default:
 		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468..b362dd3 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@
 							property,
 							value))
 			return -1;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *dev_priv =
-						encoder->dev->dev_private;
-			struct backlight_device *bd =
-						dev_priv->backlight_device;
-			bd->props.brightness = value;
-			backlight_update_status(bd);
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS") && encoder) {
 		struct drm_encoder_helper_funcs *helpers =
 					encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b57..884ba73 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@
 		        crtc_mask = dev_priv->ops->hdmi_mask;
 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			crtc_mask = (1 << 0) | (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+			break;
+		case INTEL_OUTPUT_EDP:
+			crtc_mask = (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_EDP);
 		}
 		encoder->possible_crtcs = crtc_mask;
 		encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fc7d144..df20546 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
-	drm_gem_object_release_wrap(obj);
+
+	/* Remove the list map if one is present */
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+
 	/* This must occur last as it frees up the memory of the GEM object */
 	psb_gtt_free_range(obj->dev, gtt);
 }
@@ -77,7 +82,7 @@
 
 	/* Make it mmapable */
 	if (!obj->map_list.map) {
-		ret = gem_create_mmap_offset(obj);
+		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634..0000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "gem_glue.h"
-
-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
-{
-	/* Remove the list map if one is present */
-	if (obj->map_list.map) {
-		struct drm_gem_mm *mm = obj->dev->mm_private;
-		struct drm_map_list *list = &obj->map_list;
-		drm_ht_remove_item(&mm->offset_hash, &list->hash);
-		drm_mm_put_block(list->file_offset_node);
-		kfree(list->map);
-		list->map = NULL;
-	}
-	drm_gem_object_release(obj);
-}
-
-/**
- *	gem_create_mmap_offset		-	invent an mmap offset
- *	@obj: our object
- *
- *	Standard implementation of offset generation for mmap as is
- *	duplicated in several drivers. This belongs in GEM.
- */
-int gem_create_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
-	struct drm_local_map *map;
-	int ret;
-
-	list = &obj->map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (list->map == NULL)
-		return -ENOMEM;
-	map = list->map;
-	map->type = _DRM_GEM;
-	map->size = obj->size;
-	map->handle = obj;
-
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-					obj->size / PAGE_SIZE, 0, 0);
-	if (!list->file_offset_node) {
-		dev_err(dev->dev, "failed to allocate offset for bo %d\n",
-								obj->name);
-		ret = -ENOSPC;
-		goto free_it;
-	}
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-					obj->size / PAGE_SIZE, 0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
-		goto free_it;
-	}
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		dev_err(dev->dev, "failed to add to map hash\n");
-		goto free_mm;
-	}
-	return 0;
-
-free_mm:
-	drm_mm_put_block(list->file_offset_node);
-free_it:
-	kfree(list->map);
-	list->map = NULL;
-	return ret;
-}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30..0000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8d7caf0..4fb79cf 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@
 	return NULL;
 }
 
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
+	uint8_t	panel_type;
+
+	edp = find_section(bdb, BDB_EDP);
+	
+	dev_priv->edp.bpp = 18;
+	if (!edp) {
+		if (dev_priv->edp.support) {
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+				      dev_priv->edp.bpp);
+		}
+		return;
+	}
+
+	panel_type = dev_priv->panel_type;
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->edp.bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->edp.bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, 
+				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+				dev_priv->edp.pps.t11_t12);
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+			dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: VSwing  %d, Preemph %d\n",
+			dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
 static u16
 get_blocksize(void *p)
 {
@@ -154,6 +246,8 @@
 		return;
 
 	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	dev_priv->panel_type = lvds_options->panel_type;
+
 	if (lvds_options->panel_type == 0xff)
 		return;
 
@@ -340,6 +434,9 @@
 	if (!driver)
 		return;
 
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
+
 	/* This bit means to use 96Mhz for DPLL_A or not */
 	if (driver->primary_lfp_id)
 		dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@
 	size_t size;
 	int i;
 
+
+	dev_priv->panel_type = 0xff;
+
 	/* XXX Should this validation be moved to intel_opregion.c? */
 	if (dev_priv->opregion.vbt) {
 		struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@
 	parse_sdvo_device_mapping(dev_priv, bdb);
 	parse_device_mapping(dev_priv, bdb);
 	parse_backlight_data(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
 
 	if (bios)
 		pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523..c6267c9 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
 #define _I830_BIOS_H_
 
 #include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
 
 struct vbt_header {
 	u8 signature[20];		/**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@
 #define BDB_SDVO_LVDS_PNP_IDS	 24
 #define BDB_SDVO_LVDS_POWER_SEQ	 25
 #define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
 #define BDB_LVDS_OPTIONS	 40
 #define BDB_LVDS_LFP_DATA_PTRS	 41
 #define BDB_LVDS_LFP_DATA	 42
@@ -391,6 +393,11 @@
 	u8 panel_misc_bits_4;
 } __attribute__((packed));
 
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
 struct bdb_driver_features {
 	u8 boot_dev_algorithm:1;
 	u8 block_display_switch:1;
@@ -431,6 +438,45 @@
 	u8 custom_vbt_version;
 } __attribute__((packed));
 
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	u32 sdrrs_msa_timing_delay;
+	struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
 extern int psb_intel_init_bios(struct drm_device *dev);
 extern void psb_intel_destroy_bios(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93..32dba2a 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@
 		if (drm_connector_property_set_value(connector, property,
 									value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct backlight_device *psb_bd;
-
-			psb_bd = mdfld_get_backlight_device();
-			if (psb_bd) {
-				psb_bd->props.brightness = value;
-				mdfld_set_brightness(psb_bd);
-			}
-#endif
-		}
+		else
+			gma_backlight_set(encoder->dev, value);
 	}
 set_prop_done:
 	return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index b2a790b..850cd3f 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@
 					dev_priv->platform_rev_id);
 }
 
-struct vbt_header {
+struct mid_vbt_header {
 	u32 signature;
 	u8 revision;
 } __packed;
 
 /* The same for r0 and r1 */
 struct vbt_r0 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 size;
 	u8 checksum;
 } __packed;
 
 struct vbt_r10 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 checksum;
 	u16 size;
 	u8 panel_count;
@@ -281,7 +281,7 @@
 	struct drm_device *dev = dev_priv->dev;
 	u32 addr;
 	u8 __iomem *vbt_virtual;
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
 	int ret = -1;
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4..69e51e9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 	}
 
 	/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd4..ad0d6de 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@
 
 	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
 		int max = bd->props.max_brightness;
-		bd->props.brightness = bclp * max / 255;
-		backlight_update_status(bd);
+		gma_backlight_set(dev, bclp * max / 255);
 	}
 
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 5971bc8..f1432f0 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 1bd115e..223ff5b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
 
 #include <drm/drmP.h>
 #include "drm_global.h"
-#include "gem_glue.h"
 #include "gma_drm.h"
 #include "psb_reg.h"
 #include "psb_intel_drv.h"
+#include "intel_bios.h"
 #include "gtt.h"
 #include "power.h"
 #include "opregion.h"
@@ -613,6 +613,8 @@
 	 */
 	struct backlight_device *backlight_device;
 	struct drm_property *backlight_property;
+	bool backlight_enabled;
+	int backlight_level;
 	uint32_t blc_adj1;
 	uint32_t blc_adj2;
 
@@ -640,6 +642,19 @@
 	int mdfld_panel_id;
 
 	bool dplla_96mhz;	/* DPLL data from the VBT */
+
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	uint8_t panel_type;
 };
 
 
@@ -796,6 +811,9 @@
 /* backlight.c */
 int gma_backlight_init(struct drm_device *dev);
 void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
 
 /* oaktrail_crtc.c */
 extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28..90f2d11 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
  * Display related stuff
  */
 
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
 #define INTELFB_CONN_LIMIT 4
 
@@ -69,6 +65,8 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_MIPI 7
 #define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@
 	void (*hot_plug)(struct psb_intel_encoder *);
 	int crtc_mask;
 	int clone_mask;
+	u32 ddi_select;	/* Channel info */
+#define DDI0_SELECT	0x01
+#define DDI1_SELECT	0x02
+#define DP_MASK		0x8000
+#define DDI_MASK	0x03
 	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
 
 	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@
 	u32 mode_flags;
 
 	bool active;
-	bool crtc_enable;
 
 	/* Saved Crtc HW states */
 	struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@
 extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
 extern void gma_intel_teardown_gmbus(struct drm_device *dev);
 
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
 #endif				/* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9e..2a4c3a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@
 							property,
 							value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *devp =
-						encoder->dev->dev_private;
-			struct backlight_device *bd = devp->backlight_device;
-			if (bd) {
-				bd->props.brightness = value;
-				backlight_update_status(bd);
-			}
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS")) {
 		struct drm_encoder_helper_funcs *hfuncs
 						= encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8ef..d914719 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
 #define PP_SEQUENCE_ON			(1 << 28)
 #define PP_SEQUENCE_OFF			(2 << 28)
 #define PP_SEQUENCE_MASK		0x30000000
+#define	PP_CYCLE_DELAY_ACTIVE		(1 << 27)
+#define	PP_SEQUENCE_STATE_ON_IDLE	(1 << 3)
+#define	PP_SEQUENCE_STATE_MASK		0x0000000f
+
 #define PP_CONTROL		0x61204
 #define POWER_TARGET_ON			(1 << 0)
+#define	PANEL_UNLOCK_REGS		(0xabcd << 16)
+#define	PANEL_UNLOCK_MASK		(0xffff << 16)
+#define	EDP_FORCE_VDD			(1 << 3)
+#define	EDP_BLC_ENABLE			(1 << 2)
+#define	PANEL_POWER_RESET		(1 << 1)
+#define	PANEL_POWER_OFF			(0 << 0)
+#define	PANEL_POWER_ON			(1 << 0)
 
+/* Poulsbo/Oaktrail */
 #define LVDSPP_ON		0x61208
 #define LVDSPP_OFF		0x6120c
 #define PP_CYCLE		0x61210
 
+/* Cedartrail */
 #define PP_ON_DELAYS		0x61208		/* Cedartrail */
+#define PANEL_PORT_SELECT_MASK 		(3 << 30)
+#define PANEL_PORT_SELECT_LVDS 		(0 << 30)
+#define PANEL_PORT_SELECT_EDP		(1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT	16
+#define PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT	0
+
 #define PP_OFF_DELAYS		0x6120c		/* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PP_DIVISOR		0x61210		/* Cedartrail */
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
 #define PFIT_CONTROL		0x61230
 #define PFIT_ENABLE			(1 << 31)
@@ -1282,6 +1313,10 @@
 # define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */
 # define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
 # define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE		(1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE		(1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13)
 
 #define RAMCLK_GATE_D		0x6210
 
@@ -1347,5 +1382,165 @@
 #define LANE_PLL_ENABLE		(0x3 << 20)
 #define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21))
 
+#define DP_B				0x64100
+#define DP_C				0x64200
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH_1		(0 << 19)
+#define   DP_PORT_WIDTH_2		(1 << 19)
+#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/** locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/** Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/** vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/** A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25)
+#define   PIPE_GMCH_DATA_M_TU_SIZE_SHIFT	25
+
+#define   PIPE_GMCH_DATA_M_MASK			(0xffffff)
+
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
+
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define   PIPE_BPC_MASK				(7 << 5)
+#define   PIPE_8BPC				(0 << 5)
+#define   PIPE_10BPC				(1 << 5)
+#define   PIPE_6BPC				(2 << 5)
 
 #endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0466c7b..d35f93b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1292,7 +1292,6 @@
 
 	return drm_get_edid(connector,
 			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
-	return NULL;
 }
 
 static enum drm_connector_status
@@ -1343,7 +1342,6 @@
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1404,7 +1402,6 @@
 				ret = connector_status_disconnected;
 			else
 				ret = connector_status_connected;
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1453,7 +1450,6 @@
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d9522..599099f 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@
 	return 0;
 }
 
-static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+static int ch7006_resume(struct device *dev)
 {
-	ch7006_dbg(client, "\n");
+	struct i2c_client *client = to_i2c_client(dev);
 
-	return 0;
-}
-
-static int ch7006_resume(struct i2c_client *client)
-{
 	ch7006_dbg(client, "\n");
 
 	ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@
 };
 MODULE_DEVICE_TABLE(i2c, ch7006_ids);
 
+static const struct dev_pm_ops ch7006_pm_ops = {
+	.resume = ch7006_resume,
+};
+
 static struct drm_i2c_encoder_driver ch7006_driver = {
 	.i2c_driver = {
 		.probe = ch7006_probe,
 		.remove = ch7006_remove,
-		.suspend = ch7006_suspend,
-		.resume = ch7006_resume,
 
 		.driver = {
 			.name = "ch7006",
+			.pm = &ch7006_pm_ops,
 		},
 
 		.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdb..0f2c549 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@
 	  dvo_ivch.o \
 	  dvo_tfp410.o \
 	  dvo_sil164.o \
+	  dvo_ns2501.o \
 	  i915_gem_dmabuf.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 5891469..74b5efc 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -58,13 +58,12 @@
 	void (*create_resources)(struct intel_dvo_device *dvo);
 
 	/*
-	 * Turn on/off output or set intermediate power levels if available.
+	 * Turn on/off output.
 	 *
-	 * Unsupported intermediate modes drop to the lower power setting.
-	 * If the  mode is DPMSModeOff, the output must be disabled,
-	 * as the DPLL may be disabled afterwards.
+	 * Because none of our dvo drivers support an intermediate power levels,
+	 * we don't expose this in the interfac.
 	 */
-	void (*dpms)(struct intel_dvo_device *dvo, int mode);
+	void (*dpms)(struct intel_dvo_device *dvo, bool enable);
 
 	/*
 	 * Callback for testing a video mode for a given output.
@@ -115,6 +114,12 @@
 	 */
 	enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
 
+	/*
+	 * Probe the current hw status, returning true if the connected output
+	 * is active.
+	 */
+	bool (*get_hw_state)(struct intel_dvo_device *dev);
+
 	/**
 	 * Query the device for the modes it provides.
 	 *
@@ -140,5 +145,6 @@
 extern struct intel_dvo_dev_ops ivch_ops;
 extern struct intel_dvo_dev_ops tfp410_ops;
 extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
 
 #endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a..86b27d1 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
 
 static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 {
@@ -309,7 +309,7 @@
 	lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
 			  (mode->hdisplay & 0x0700) >> 8;
 
-	ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+	ch7017_dpms(dvo, false);
 	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
 			horizontal_active_pixel_input);
 	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@
 }
 
 /* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	uint8_t val;
 
@@ -345,7 +345,7 @@
 			CH7017_DAC3_POWER_DOWN |
 			CH7017_TV_POWER_DOWN_EN);
 
-	if (mode == DRM_MODE_DPMS_ON) {
+	if (enable) {
 		/* Turn on the LVDS */
 		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
 			     val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@
 	msleep(20);
 }
 
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	if (val & CH7017_LVDS_POWER_DOWN_EN)
+		return false;
+	else
+		return true;
+}
+
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val;
@@ -396,6 +408,7 @@
 	.mode_valid = ch7017_mode_valid,
 	.mode_set = ch7017_mode_set,
 	.dpms = ch7017_dpms,
+	.get_hw_state = ch7017_get_hw_state,
 	.dump_regs = ch7017_dump_regs,
 	.destroy = ch7017_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a03660..38f3a6c 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@
 }
 
 /* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
 	else
 		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
 }
 
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+	u8 val;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+	if (val & CH7xxx_PM_FPD)
+		return false;
+	else
+		return true;
+}
+
 static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 {
 	int i;
@@ -326,6 +338,7 @@
 	.mode_valid = ch7xxx_mode_valid,
 	.mode_set = ch7xxx_mode_set,
 	.dpms = ch7xxx_dpms,
+	.get_hw_state = ch7xxx_get_hw_state,
 	.dump_regs = ch7xxx_dump_regs,
 	.destroy = ch7xxx_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893..baaf65b 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@
 }
 
 /** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int i;
 	uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@
 	if (!ivch_read(dvo, VR01, &vr01))
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		backlight = 1;
 	else
 		backlight = 0;
 	ivch_write(dvo, VR80, backlight);
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
 	else
 		vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@
 		if (!ivch_read(dvo, VR30, &vr30))
 			break;
 
-		if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+		if (((vr30 & VR30_PANEL_ON) != 0) == enable)
 			break;
 		udelay(1000);
 	}
@@ -323,6 +323,20 @@
 	udelay(16 * 1000);
 }
 
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint16_t vr01;
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return false;
+
+	if (vr01 & VR01_LCD_ENABLE)
+		return true;
+	else
+		return false;
+}
+
 static void ivch_mode_set(struct intel_dvo_device *dvo,
 			  struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@
 struct intel_dvo_dev_ops ivch_ops = {
 	.init = ivch_init,
 	.dpms = ivch_dpms,
+	.get_hw_state = ivch_get_hw_state,
 	.mode_valid = ivch_mode_valid,
 	.mode_set = ivch_mode_set,
 	.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 0000000..c4a255b
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+	//I2CDevRec d;
+	bool quiet;
+	int reg_8_shadow;
+	int reg_8_set;
+	// Shadow registers for i915
+	int dvoc;
+	int pll_a;
+	int srcdim;
+	int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+	ns->dvoc = I915_READ(DVO_C);
+	ns->pll_a = I915_READ(_DPLL_A);
+	ns->srcdim = I915_READ(DVOC_SRCDIM);
+	ns->fw_blc = I915_READ(FW_BLC);
+
+	I915_WRITE(DVOC, 0x10004084);
+	I915_WRITE(_DPLL_A, 0xd0820000);
+	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
+	I915_WRITE(FW_BLC, 0x1080304);
+
+	I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	I915_WRITE(DVOC, ns->dvoc);
+	I915_WRITE(_DPLL_A, ns->pll_a);
+	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+	I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = in_buf,
+		 }
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS
+		    ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+		     adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1) {
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+			      addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the NS2501 chip on the specified i2c bus */
+	struct ns2501_priv *ns;
+	unsigned char ch;
+
+	ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+	if (ns == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ns;
+	ns->quiet = true;
+
+	if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_VID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_DID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	ns->quiet = false;
+	ns->reg_8_set = 0;
+	ns->reg_8_shadow =
+	    NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+	DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(ns);
+	return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+	/*
+	 * This is a Laptop display, it doesn't have hotplugging.
+	 * Even if not, the detection bit of the 2501 is unreliable as
+	 * it only works for some display types.
+	 * It is even more unreliable as the PLL must be active for
+	 * allowing reading from the chiop.
+	 */
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS
+	    ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Currently, these are all the modes I have data from.
+	 * More might exist. Unclear how to find the native resolution
+	 * of the panel in here so we could always accept it
+	 * by disabling the scaler.
+	 */
+	if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+	    (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+	    (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+		return MODE_OK;
+	} else {
+		return MODE_ONE_SIZE;	/* Is this a reasonable error? */
+	}
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	DRM_DEBUG_KMS
+	    ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Where do I find the native resolution for which scaling is not required???
+	 *
+	 * First trigger the DVO on as otherwise the chip does not appear on the i2c
+	 * bus.
+	 */
+	do {
+		ok = true;
+
+		if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+			/* mode 277 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+			DRM_DEBUG_KMS("%s: switching to 800x600\n",
+				      __FUNCTION__);
+
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ok &= ns2501_writeb(dvo, 0x11, 0xc8);	// 0xc7 also works.
+			ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x62);	// VBIOS left 0x64 here, but 0x62 works nicer
+			ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0x27);
+			ok &= ns2501_writeb(dvo, 0x81, 0x03);
+			ok &= ns2501_writeb(dvo, 0x82, 0x41);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xfe);	/* vertical. VBIOS left 0xff here, but 0xfe works better */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x23);	/* Looks like first and last line of the image. */
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xc8);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+			/* mode 274 */
+			DRM_DEBUG_KMS("%s: switching to 640x480\n",
+				      __FUNCTION__);
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+			ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+			ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+			ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0xff);
+			ok &= ns2501_writeb(dvo, 0x81, 0x07);
+			ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xff);	/* vertical */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xa0);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+			/* mode 280 */
+			DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+				      __FUNCTION__);
+			/*
+			 * This might or might not work, actually. I'm silently
+			 * assuming here that the native panel resolution is
+			 * 1024x768. If not, then this leaves the scaler disabled
+			 * generating a picture that is likely not the expected.
+			 *
+			 * Problem is that I do not know where to take the panel
+			 * dimensions from.
+			 *
+			 * Enable the bypass, scaling not required.
+			 *
+			 * The scaler registers are irrelevant here....
+			 *
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+			ok &= ns2501_writeb(dvo, 0x37, 0x44);
+		} else {
+			/*
+			 * Data not known. Bummer!
+			 * Hopefully, the code should not go here
+			 * as mode_OK delivered no other modes.
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+		}
+		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+		if (!ok) {
+			if (restore)
+				restore_dvo(dvo);
+			enable_dvo(dvo);
+			restore = true;
+		}
+	} while (!ok);
+	/*
+	 * Restore the old i915 registers before
+	 * forcing the ns2501 on.
+	 */
+	if (restore)
+		restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+	unsigned char ch;
+
+	if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+		return false;
+
+	if (ch & NS2501_8_PD)
+		return true;
+	else
+		return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	unsigned char ch;
+
+	DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+		      __FUNCTION__, enable);
+
+	ch = ns->reg_8_shadow;
+
+	if (enable)
+		ch |= NS2501_8_PD;
+	else
+		ch &= ~NS2501_8_PD;
+
+	if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+		ns->reg_8_set = 1;
+		ns->reg_8_shadow = ch;
+
+		do {
+			ok = true;
+			ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+			ok &=
+			    ns2501_writeb(dvo, 0x34,
+					  enable ? 0x03 : 0x00);
+			ok &=
+			    ns2501_writeb(dvo, 0x35,
+					  enable ? 0xff : 0x00);
+			if (!ok) {
+				if (restore)
+					restore_dvo(dvo);
+				enable_dvo(dvo);
+				restore = true;
+			}
+		} while (!ok);
+
+		if (restore)
+			restore_dvo(dvo);
+	}
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+	DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+	DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG8, &val);
+	DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG9, &val);
+	DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REGC, &val);
+	DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+
+	if (ns) {
+		kfree(ns);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+	.init = ns2501_init,
+	.detect = ns2501_detect,
+	.mode_valid = ns2501_mode_valid,
+	.mode_set = ns2501_mode_set,
+	.dpms = ns2501_dpms,
+	.get_hw_state = ns2501_get_hw_state,
+	.dump_regs = ns2501_dump_regs,
+	.destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6..4debd32 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@
 }
 
 /* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int ret;
 	unsigned char ch;
@@ -217,7 +217,7 @@
 	if (ret == false)
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ch |= SIL164_8_PD;
 	else
 		ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@
 	return;
 }
 
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return false;
+
+	if (ch & SIL164_8_PD)
+		return true;
+	else
+		return false;
+}
+
 static void sil164_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val;
@@ -258,6 +273,7 @@
 	.mode_valid = sil164_mode_valid,
 	.mode_set = sil164_mode_set,
 	.dpms = sil164_dpms,
+	.get_hw_state = sil164_get_hw_state,
 	.dump_regs = sil164_dump_regs,
 	.destroy = sil164_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3e..e17f1b0 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@
 }
 
 /* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	uint8_t ctl1;
 
 	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ctl1 |= TFP410_CTL_1_PD;
 	else
 		ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@
 	tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
 }
 
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return false;
+
+	if (ctl1 & TFP410_CTL_1_PD)
+		return true;
+	else
+		return false;
+}
+
 static void tfp410_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val, val2;
@@ -299,6 +312,7 @@
 	.mode_valid = tfp410_mode_valid,
 	.mode_set = tfp410_mode_set,
 	.dpms = tfp410_dpms,
+	.get_hw_state = tfp410_get_hw_state,
 	.dump_regs = tfp410_dump_regs,
 	.destroy = tfp410_destroy,
 };
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 359f6e8..7a4b3f3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -44,7 +44,6 @@
 
 enum {
 	ACTIVE_LIST,
-	FLUSHING_LIST,
 	INACTIVE_LIST,
 	PINNED_LIST,
 };
@@ -62,28 +61,11 @@
 
 	seq_printf(m, "gen: %d\n", info->gen);
 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-	B(is_mobile);
-	B(is_i85x);
-	B(is_i915g);
-	B(is_i945gm);
-	B(is_g33);
-	B(need_gfx_hws);
-	B(is_g4x);
-	B(is_pineview);
-	B(is_broadwater);
-	B(is_crestline);
-	B(has_fbc);
-	B(has_pipe_cxsr);
-	B(has_hotplug);
-	B(cursor_needs_physical);
-	B(has_overlay);
-	B(overlay_needs_physical);
-	B(supports_tv);
-	B(has_bsd_ring);
-	B(has_blt_ring);
-	B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+	DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
 
 	return 0;
 }
@@ -121,20 +103,23 @@
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain,
-		   obj->last_rendering_seqno,
+		   obj->last_read_seqno,
+		   obj->last_write_seqno,
 		   obj->last_fenced_seqno,
 		   cache_level_str(obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
+	if (obj->pin_count)
+		seq_printf(m, " (pinned x %d)", obj->pin_count);
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	if (obj->gtt_space != NULL)
@@ -177,10 +162,6 @@
 		seq_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		break;
-	case FLUSHING_LIST:
-		seq_printf(m, "Flushing:\n");
-		head = &dev_priv->mm.flushing_list;
-		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
@@ -218,8 +199,8 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 count, mappable_count;
-	size_t size, mappable_size;
+	u32 count, mappable_count, purgeable_count;
+	size_t size, mappable_size, purgeable_size;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
@@ -232,13 +213,12 @@
 		   dev_priv->mm.object_memory);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.gtt_list, gtt_list);
+	count_objects(&dev_priv->mm.bound_list, gtt_list);
 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.active_list, mm_list);
-	count_objects(&dev_priv->mm.flushing_list, mm_list);
 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -247,8 +227,16 @@
 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
+	size = count = purgeable_size = purgeable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+		size += obj->base.size, ++count;
+		if (obj->madv == I915_MADV_DONTNEED)
+			purgeable_size += obj->base.size, ++purgeable_count;
+	}
+	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
 	size = count = mappable_size = mappable_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		if (obj->fault_mappable) {
 			size += obj->gtt_space->size;
 			++count;
@@ -257,7 +245,13 @@
 			mappable_size += obj->gtt_space->size;
 			++mappable_count;
 		}
+		if (obj->madv == I915_MADV_DONTNEED) {
+			purgeable_size += obj->base.size;
+			++purgeable_count;
+		}
 	}
+	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+		   purgeable_count, purgeable_size);
 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
 		   mappable_count, mappable_size);
 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -286,7 +280,7 @@
 		return ret;
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		if (list == PINNED_LIST && obj->pin_count == 0)
 			continue;
 
@@ -359,40 +353,22 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_request *gem_request;
-	int ret, count;
+	int ret, count, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
 	count = 0;
-	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
-		seq_printf(m, "Render requests:\n");
+	for_each_ring(ring, dev_priv, i) {
+		if (list_empty(&ring->request_list))
+			continue;
+
+		seq_printf(m, "%s requests:\n", ring->name);
 		list_for_each_entry(gem_request,
-				    &dev_priv->ring[RCS].request_list,
-				    list) {
-			seq_printf(m, "    %d @ %d\n",
-				   gem_request->seqno,
-				   (int) (jiffies - gem_request->emitted_jiffies));
-		}
-		count++;
-	}
-	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
-		seq_printf(m, "BSD requests:\n");
-		list_for_each_entry(gem_request,
-				    &dev_priv->ring[VCS].request_list,
-				    list) {
-			seq_printf(m, "    %d @ %d\n",
-				   gem_request->seqno,
-				   (int) (jiffies - gem_request->emitted_jiffies));
-		}
-		count++;
-	}
-	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
-		seq_printf(m, "BLT requests:\n");
-		list_for_each_entry(gem_request,
-				    &dev_priv->ring[BCS].request_list,
+				    &ring->request_list,
 				    list) {
 			seq_printf(m, "    %d @ %d\n",
 				   gem_request->seqno,
@@ -413,7 +389,7 @@
 {
 	if (ring->get_seqno) {
 		seq_printf(m, "Current sequence (%s): %d\n",
-			   ring->name, ring->get_seqno(ring));
+			   ring->name, ring->get_seqno(ring, false));
 	}
 }
 
@@ -422,14 +398,15 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_ring_seqno_info(m, &dev_priv->ring[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_seqno_info(m, ring);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -442,6 +419,7 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -519,13 +497,13 @@
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for_each_ring(ring, dev_priv, i) {
 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
-			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
-				   dev_priv->ring[i].name,
-				   I915_READ_IMR(&dev_priv->ring[i]));
+			seq_printf(m,
+				   "Graphics Interrupt mask (%s):	%08x\n",
+				   ring->name, I915_READ_IMR(ring));
 		}
-		i915_ring_seqno_info(m, &dev_priv->ring[i]);
+		i915_ring_seqno_info(m, ring);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -548,7 +526,8 @@
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
-		seq_printf(m, "Fenced object[%2d] = ", i);
+		seq_printf(m, "Fence %d, pin count = %d, object = ",
+			   i, dev_priv->fence_regs[i].pin_count);
 		if (obj == NULL)
 			seq_printf(m, "unused");
 		else
@@ -630,12 +609,12 @@
 	seq_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+		seq_printf(m, "  %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
 			   err->gtt_offset,
 			   err->size,
 			   err->read_domains,
 			   err->write_domain,
-			   err->seqno,
+			   err->rseqno, err->wseqno,
 			   pin_flag(err->pinned),
 			   tiling_flag(err->tiling),
 			   dirty_flag(err->dirty),
@@ -667,10 +646,9 @@
 	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-	if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
-		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-	}
+
 	if (INTEL_INFO(dev)->gen >= 4)
 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -719,11 +697,17 @@
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
+	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
 	if (INTEL_INFO(dev)->gen >= 6) {
 		seq_printf(m, "ERROR: 0x%08x\n", error->error);
 		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
+	if (INTEL_INFO(dev)->gen == 7)
+		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
 	for_each_ring(ring, dev_priv, i)
 		i915_ring_error_state(m, dev, error, i);
 
@@ -799,10 +783,14 @@
 	struct seq_file *m = filp->private_data;
 	struct i915_error_state_file_priv *error_priv = m->private;
 	struct drm_device *dev = error_priv->dev;
+	int ret;
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	i915_destroy_error_state(dev);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -926,7 +914,7 @@
 		seq_printf(m, "Render p-state limit: %d\n",
 			   rp_state_limits & 0xff);
 		seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
-						GEN6_CAGF_SHIFT) * 50);
+						GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 			   GEN6_CURICONT_MASK);
 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -942,15 +930,15 @@
 
 		max_freq = (rp_state_cap & 0xff0000) >> 16;
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 
 		max_freq = rp_state_cap & 0xff;
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 	} else {
 		seq_printf(m, "no P-state info available\n");
 	}
@@ -1292,7 +1280,8 @@
 
 	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+	for (gpu_freq = dev_priv->rps.min_delay;
+	     gpu_freq <= dev_priv->rps.max_delay;
 	     gpu_freq++) {
 		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1303,7 +1292,7 @@
 			continue;
 		}
 		ia_freq = I915_READ(GEN6_PCODE_DATA);
-		seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+		seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -1472,8 +1461,12 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1520,9 +1513,7 @@
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ring = &dev_priv->ring[i];
-
+	for_each_ring(ring, dev_priv, i) {
 		seq_printf(m, "%s\n", ring->name);
 		if (INTEL_INFO(dev)->gen == 7)
 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1674,7 +1665,7 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 0;
+	int val = 0, ret;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1689,7 +1680,10 @@
 
 	DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	dev_priv->stop_rings = val;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1713,10 +1707,18 @@
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "max freq: %d\n", dev_priv->max_delay * 50);
+		       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1733,7 +1735,10 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1748,12 +1753,17 @@
 
 	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
-	dev_priv->max_delay = val / 50;
+	dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
 
-	gen6_set_rps(dev, val / 50);
+	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1773,10 +1783,18 @@
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "min freq: %d\n", dev_priv->min_delay * 50);
+		       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1791,7 +1809,10 @@
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1806,12 +1827,17 @@
 
 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
-	dev_priv->min_delay = val / 50;
+	dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
 
-	gen6_set_rps(dev, val / 50);
+	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1834,9 +1860,15 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
 	u32 snpcr;
-	int len;
+	int len, ret;
 
-	mutex_lock(&dev_priv->dev->struct_mutex);
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
@@ -1862,6 +1894,9 @@
 	u32 snpcr;
 	int val = 1;
 
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
@@ -1925,16 +1960,11 @@
 {
 	struct drm_device *dev = inode->i_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	gen6_gt_force_wake_get(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -1947,16 +1977,7 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	/*
-	 * It's bad that we can potentially hang userspace if struct_mutex gets
-	 * forever stuck.  However, if we cannot acquire this lock it means that
-	 * almost certainly the driver has hung, is not unload-able. Therefore
-	 * hanging here is probably a minor inconvenience not to be seen my
-	 * almost every user.
-	 */
-	mutex_lock(&dev->struct_mutex);
 	gen6_gt_force_wake_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -2006,7 +2027,6 @@
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
@@ -2067,6 +2087,7 @@
 				  &i915_cache_sharing_fops);
 	if (ret)
 		return ret;
+
 	ret = i915_debugfs_create(minor->debugfs_root, minor,
 				  "i915_ring_stop",
 				  &i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 914c0df..491394f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -235,10 +235,10 @@
 		}
 	}
 
-	dev_priv->cpp = init->cpp;
-	dev_priv->back_offset = init->back_offset;
-	dev_priv->front_offset = init->front_offset;
-	dev_priv->current_page = 0;
+	dev_priv->dri1.cpp = init->cpp;
+	dev_priv->dri1.back_offset = init->back_offset;
+	dev_priv->dri1.front_offset = init->front_offset;
+	dev_priv->dri1.current_page = 0;
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->pf_current_page = 0;
 
@@ -575,7 +575,7 @@
 
 	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
 			  __func__,
-			 dev_priv->current_page,
+			 dev_priv->dri1.current_page,
 			 master_priv->sarea_priv->pf_current_page);
 
 	i915_kernel_lost_context(dev);
@@ -589,12 +589,12 @@
 
 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
 	OUT_RING(0);
-	if (dev_priv->current_page == 0) {
-		OUT_RING(dev_priv->back_offset);
-		dev_priv->current_page = 1;
+	if (dev_priv->dri1.current_page == 0) {
+		OUT_RING(dev_priv->dri1.back_offset);
+		dev_priv->dri1.current_page = 1;
 	} else {
-		OUT_RING(dev_priv->front_offset);
-		dev_priv->current_page = 0;
+		OUT_RING(dev_priv->dri1.front_offset);
+		dev_priv->dri1.current_page = 0;
 	}
 	OUT_RING(0);
 
@@ -613,7 +613,7 @@
 		ADVANCE_LP_RING();
 	}
 
-	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
 	return 0;
 }
 
@@ -1009,6 +1009,12 @@
 	case I915_PARAM_HAS_WAIT_TIMEOUT:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_SEMAPHORES:
+		value = i915_semaphore_is_enabled(dev);
+		break;
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -1425,6 +1431,21 @@
 	kfree(ap);
 }
 
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+	const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			 info->gen,
+			 dev_priv->dev->pdev->device,
+			 DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1440,7 +1461,7 @@
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_device_info *info;
-	int ret = 0, mmio_bar;
+	int ret = 0, mmio_bar, mmio_size;
 	uint32_t aperture_size;
 
 	info = (struct intel_device_info *) flags;
@@ -1449,7 +1470,6 @@
 	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1465,6 +1485,8 @@
 	dev_priv->dev = dev;
 	dev_priv->info = info;
 
+	i915_dump_device_info(dev_priv);
+
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1504,7 +1526,19 @@
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
 	mmio_bar = IS_GEN2(dev) ? 1 : 0;
-	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+	/* Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (info->gen < 5)
+		mmio_size = 512*1024;
+	else
+		mmio_size = 2*1024*1024;
+
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
 	if (!dev_priv->regs) {
 		DRM_ERROR("failed to map registers\n");
 		ret = -EIO;
@@ -1536,11 +1570,9 @@
 	 *
 	 * All tasks on the workqueue are expected to acquire the dev mutex
 	 * so there is no point in running more than one instance of the
-	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
+	 * workqueue at any time.  Use an ordered one.
 	 */
-	dev_priv->wq = alloc_workqueue("i915",
-				       WQ_UNBOUND | WQ_NON_REENTRANT,
-				       1);
+	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 	if (dev_priv->wq == NULL) {
 		DRM_ERROR("Failed to create our workqueue.\n");
 		ret = -ENOMEM;
@@ -1586,7 +1618,7 @@
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	spin_lock_init(&dev_priv->rps_lock);
+	spin_lock_init(&dev_priv->rps.lock);
 	spin_lock_init(&dev_priv->dpio_lock);
 
 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1836,6 +1868,8 @@
 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1858,6 +1892,7 @@
 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a24ffbe..a7837e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -470,6 +470,9 @@
 				"GEM idle failed, resume might fail\n");
 			return error;
 		}
+
+		intel_modeset_disable(dev);
+
 		drm_irq_uninstall(dev);
 	}
 
@@ -543,13 +546,9 @@
 		mutex_unlock(&dev->struct_mutex);
 
 		intel_modeset_init_hw(dev);
+		intel_modeset_setup_hw_state(dev);
 		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
-
-		/* Resume the modeset for every activated CRTC */
-		mutex_lock(&dev->mode_config.mutex);
-		drm_helper_resume_force_mode(dev);
-		mutex_unlock(&dev->mode_config.mutex);
 	}
 
 	intel_opregion_init(dev);
@@ -1060,7 +1059,7 @@
 	 * This should make it easier to transition modules over to the
 	 * new register block scheme, since we can do it incrementally.
 	 */
-	if (reg >= 0x180000)
+	if (reg >= VLV_DISPLAY_BASE)
 		return false;
 
 	if (reg >= RENDER_RING_BASE &&
@@ -1174,9 +1173,59 @@
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
+	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+		DRM_ERROR("Unclaimed write to %x\n", reg); \
+		writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);	\
+	} \
 }
 __i915_write(8, b)
 __i915_write(16, w)
 __i915_write(32, l)
 __i915_write(64, q)
 #undef __i915_write
+
+static const struct register_whitelist {
+	uint64_t offset;
+	uint32_t size;
+	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reg_read *reg = data;
+	struct register_whitelist const *entry = whitelist;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+		if (entry->offset == reg->offset &&
+		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(whitelist))
+		return -EINVAL;
+
+	switch (entry->size) {
+	case 8:
+		reg->val = I915_READ64(reg->offset);
+		break;
+	case 4:
+		reg->val = I915_READ(reg->offset);
+		break;
+	case 2:
+		reg->val = I915_READ16(reg->offset);
+		break;
+	case 1:
+		reg->val = I915_READ8(reg->offset);
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35..af5ceb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@
 
 #define WATCH_COHERENCY	0
 #define WATCH_LISTS	0
+#define WATCH_GTT	0
 
 #define I915_GEM_PHYS_CURSOR_0 1
 #define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@
 	u32 cpu_ring_head[I915_NUM_RINGS];
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
 	u32 instpm[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
-	u32 instdone1;
+	u32 extra_instdone[I915_NUM_INSTDONE_REG];
 	u32 seqno[I915_NUM_RINGS];
 	u64 bbaddr;
 	u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 seqno;
+		u32 rseqno, wseqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
@@ -239,7 +241,6 @@
 };
 
 struct drm_i915_display_funcs {
-	void (*dpms)(struct drm_crtc *crtc, int mode);
 	bool (*fbc_enabled)(struct drm_device *dev);
 	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
 	void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 				 uint32_t sprite_width, int pixel_size);
-	void (*sanitize_pm)(struct drm_device *dev);
 	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
 				 struct drm_display_mode *mode);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@
 			     struct drm_display_mode *adjusted_mode,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
+	void (*crtc_enable)(struct drm_crtc *crtc);
+	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
 			  struct drm_crtc *crtc);
@@ -279,6 +281,32 @@
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 };
 
+#define DEV_INFO_FLAGS \
+	DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+	DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+	DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+	DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_llc)
+
 struct intel_device_info {
 	u8 gen;
 	u8 is_mobile:1;
@@ -402,12 +430,6 @@
 
 	struct resource mch_res;
 
-	unsigned int cpp;
-	int back_offset;
-	int front_offset;
-	int current_page;
-	int page_flipping;
-
 	atomic_t irq_received;
 
 	/* protects the irq masks */
@@ -425,7 +447,6 @@
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
 
-	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int num_pipe;
 	int num_pch_pll;
 
@@ -434,8 +455,7 @@
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
 	uint32_t last_acthd[I915_NUM_RINGS];
-	uint32_t last_instdone;
-	uint32_t last_instdone1;
+	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
 	unsigned int stop_rings;
 
@@ -666,7 +686,13 @@
 		struct drm_mm gtt_space;
 		/** List of all objects in gtt_space. Used to restore gtt
 		 * mappings on resume */
-		struct list_head gtt_list;
+		struct list_head bound_list;
+		/**
+		 * List of objects which are not bound to the GTT (thus
+		 * are idle and not used by the GPU) but still have
+		 * (presumably uncached) pages still attached.
+		 */
+		struct list_head unbound_list;
 
 		/** Usable portion of the GTT for GEM */
 		unsigned long gtt_start;
@@ -696,17 +722,6 @@
 		struct list_head active_list;
 
 		/**
-		 * List of objects which are not in the ringbuffer but which
-		 * still have a write_domain which needs to be flushed before
-		 * unbinding.
-		 *
-		 * last_rendering_seqno is 0 while an object is in this list.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
-		struct list_head flushing_list;
-
-		/**
 		 * LRU list of objects which are not in the ringbuffer and
 		 * are ready to unbind, but are still in the GTT.
 		 *
@@ -775,6 +790,12 @@
 	struct {
 		unsigned allow_batchbuffer : 1;
 		u32 __iomem *gfx_hws_cpu_addr;
+
+		unsigned int cpp;
+		int back_offset;
+		int front_offset;
+		int current_page;
+		int page_flipping;
 	} dri1;
 
 	/* Kernel Modesetting */
@@ -796,9 +817,6 @@
 	bool lvds_downclock_avail;
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
-	struct work_struct idle_work;
-	struct timer_list idle_timer;
-	bool busy;
 	u16 orig_clock;
 	int child_dev_num;
 	struct child_device_config *child_dev;
@@ -807,26 +825,41 @@
 
 	bool mchbar_need_disable;
 
-	struct work_struct rps_work;
-	spinlock_t rps_lock;
-	u32 pm_iir;
+	/* gen6+ rps state */
+	struct {
+		struct work_struct work;
+		u32 pm_iir;
+		/* lock - irqsave spinlock that protectects the work_struct and
+		 * pm_iir. */
+		spinlock_t lock;
 
-	u8 cur_delay;
-	u8 min_delay;
-	u8 max_delay;
-	u8 fmax;
-	u8 fstart;
+		/* The below variables an all the rps hw state are protected by
+		 * dev->struct mutext. */
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+	} rps;
 
-	u64 last_count1;
-	unsigned long last_time1;
-	unsigned long chipset_power;
-	u64 last_count2;
-	struct timespec last_time2;
-	unsigned long gfx_power;
-	int c_m;
-	int r_t;
-	u8 corr;
-	spinlock_t *mchdev_lock;
+	/* ilk-only ips/rps state. Everything in here is protected by the global
+	 * mchdev_lock in intel_pm.c */
+	struct {
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+		u8 fmax;
+		u8 fstart;
+
+		u64 last_count1;
+		unsigned long last_time1;
+		unsigned long chipset_power;
+		u64 last_count2;
+		struct timespec last_time2;
+		unsigned long gfx_power;
+		u8 corr;
+
+		int c_m;
+		int r_t;
+	} ips;
 
 	enum no_fbc_reason no_fbc_reason;
 
@@ -861,30 +894,48 @@
 };
 
 enum i915_cache_level {
-	I915_CACHE_NONE,
+	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC,
-	I915_CACHE_LLC_MLC, /* gen6+ */
+	I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+struct drm_i915_gem_object_ops {
+	/* Interface between the GEM object and its backing storage.
+	 * get_pages() is called once prior to the use of the associated set
+	 * of pages before to binding them into the GTT, and put_pages() is
+	 * called after we no longer need them. As we expect there to be
+	 * associated cost with migrating pages between the backing storage
+	 * and making them available for the GPU (e.g. clflush), we may hold
+	 * onto the pages after they are no longer referenced by the GPU
+	 * in case they may be used again shortly (for example migrating the
+	 * pages to a different memory domain within the GTT). put_pages()
+	 * will therefore most likely be called when the object itself is
+	 * being released or under memory pressure (where we attempt to
+	 * reap pages for the shrinker).
+	 */
+	int (*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *);
 };
 
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
+	const struct drm_i915_gem_object_ops *ops;
+
 	/** Current space allocated to this object in the GTT, if any. */
 	struct drm_mm_node *gtt_space;
 	struct list_head gtt_list;
 
-	/** This object's place on the active/flushing/inactive lists */
+	/** This object's place on the active/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
-	/** This object's place on GPU write list */
-	struct list_head gpu_write_list;
 	/** This object's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
 	/**
-	 * This is set if the object is on the active or flushing lists
-	 * (has pending rendering), and is not set if it's on inactive (ready
-	 * to be unbound).
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
 	 */
 	unsigned int active:1;
 
@@ -895,12 +946,6 @@
 	unsigned int dirty:1;
 
 	/**
-	 * This is set if the object has been written to since the last
-	 * GPU flush.
-	 */
-	unsigned int pending_gpu_write:1;
-
-	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@
 
 	unsigned int has_aliasing_ppgtt_mapping:1;
 	unsigned int has_global_gtt_mapping:1;
+	unsigned int has_dma_mapping:1;
 
-	struct page **pages;
-
-	/**
-	 * DMAR support
-	 */
-	struct scatterlist *sg_list;
-	int num_sg;
+	struct sg_table *pages;
+	int pages_pin_count;
 
 	/* prime dma-buf support */
-	struct sg_table *sg_table;
 	void *dma_buf_vmapping;
 	int vmapping_count;
 
@@ -992,7 +1032,8 @@
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
-	uint32_t last_rendering_seqno;
+	uint32_t last_read_seqno;
+	uint32_t last_write_seqno;
 	/** Breadcrumb of last fenced GPU access to the buffer. */
 	uint32_t last_fenced_seqno;
 
@@ -1135,6 +1176,10 @@
 
 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
 
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
 #include "i915_trace.h"
 
 /**
@@ -1256,6 +1301,10 @@
 			 struct drm_file *file_priv);
 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file);
+int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file);
 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@
 			struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
-				     uint32_t invalidate_domains,
-				     uint32_t flush_domains);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     uint32_t alignment,
-				     bool map_and_fenceable);
+				     bool map_and_fenceable,
+				     bool nonblocking);
 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
-int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
-				  gfp_t gfpmask);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+	struct scatterlist *sg = obj->pages->sgl;
+	while (n >= SG_MAX_SINGLE_ALLOC) {
+		sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+		n -= SG_MAX_SINGLE_ALLOC - 1;
+	}
+	return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages == NULL);
+	obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages_pin_count == 0);
+	obj->pages_pin_count--;
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
-				  struct drm_file *file,
-				  struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+		     struct drm_file *file,
+		     struct drm_i915_gem_request *request);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
 				 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
-					  unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+					  unsigned alignment,
+					  unsigned cache_level,
+					  bool mappable,
+					  bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
 int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@
 extern int intel_enable_rc6(const struct drm_device *dev);
 
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
 
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 274d25d..2f76905 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,12 +37,12 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 						    unsigned alignment,
-						    bool map_and_fenceable);
+						    bool map_and_fenceable,
+						    bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
 				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
@@ -56,6 +56,8 @@
 
 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
 				    struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -141,7 +143,7 @@
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return !obj->active;
+	return obj->gtt_space && !obj->active;
 }
 
 int
@@ -180,7 +182,7 @@
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 		if (obj->pin_count)
 			pinned += obj->gtt_space->size;
 	mutex_unlock(&dev->struct_mutex);
@@ -341,7 +343,7 @@
 				      page_length);
 	kunmap_atomic(vaddr);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 static void
@@ -392,7 +394,7 @@
 				     page_length);
 	kunmap(page);
 
-	return ret;
+	return ret ? - EFAULT : 0;
 }
 
 static int
@@ -401,7 +403,6 @@
 		     struct drm_i915_gem_pread *args,
 		     struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	char __user *user_data;
 	ssize_t remain;
 	loff_t offset;
@@ -410,7 +411,8 @@
 	int hit_slowpath = 0;
 	int prefaulted = 0;
 	int needs_clflush = 0;
-	int release_page;
+	struct scatterlist *sg;
+	int i;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -424,16 +426,30 @@
 		 * anyway again before the next pread happens. */
 		if (obj->cache_level == I915_CACHE_NONE)
 			needs_clflush = 1;
-		ret = i915_gem_object_set_to_gtt_domain(obj, false);
-		if (ret)
-			return ret;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, false);
+			if (ret)
+				return ret;
+		}
 	}
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
 	offset = args->offset;
 
-	while (remain > 0) {
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 
+		if (i < offset >> PAGE_SHIFT)
+			continue;
+
+		if (remain <= 0)
+			break;
+
 		/* Operation in this page
 		 *
 		 * shmem_page_offset = offset within page in shmem file
@@ -444,18 +460,7 @@
 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - shmem_page_offset;
 
-		if (obj->pages) {
-			page = obj->pages[offset >> PAGE_SHIFT];
-			release_page = 0;
-		} else {
-			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto out;
-			}
-			release_page = 1;
-		}
-
+		page = sg_page(sg);
 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
 			(page_to_phys(page) & (1 << 17)) != 0;
 
@@ -466,7 +471,6 @@
 			goto next_page;
 
 		hit_slowpath = 1;
-		page_cache_get(page);
 		mutex_unlock(&dev->struct_mutex);
 
 		if (!prefaulted) {
@@ -484,16 +488,12 @@
 				       needs_clflush);
 
 		mutex_lock(&dev->struct_mutex);
-		page_cache_release(page);
+
 next_page:
 		mark_page_accessed(page);
-		if (release_page)
-			page_cache_release(page);
 
-		if (ret) {
-			ret = -EFAULT;
+		if (ret)
 			goto out;
-		}
 
 		remain -= page_length;
 		user_data += page_length;
@@ -501,6 +501,8 @@
 	}
 
 out:
+	i915_gem_object_unpin_pages(obj);
+
 	if (hit_slowpath) {
 		/* Fixup: Kill any reinstated backing storage pages */
 		if (obj->madv == __I915_MADV_PURGED)
@@ -606,7 +608,7 @@
 	char __user *user_data;
 	int page_offset, page_length, ret;
 
-	ret = i915_gem_object_pin(obj, 0, true);
+	ret = i915_gem_object_pin(obj, 0, true, true);
 	if (ret)
 		goto out;
 
@@ -686,7 +688,7 @@
 				       page_length);
 	kunmap_atomic(vaddr);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 /* Only difference to the fast-path function is that this can handle bit17
@@ -720,7 +722,7 @@
 					     page_do_bit17_swizzling);
 	kunmap(page);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 static int
@@ -729,7 +731,6 @@
 		      struct drm_i915_gem_pwrite *args,
 		      struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
 	loff_t offset;
 	char __user *user_data;
@@ -738,7 +739,8 @@
 	int hit_slowpath = 0;
 	int needs_clflush_after = 0;
 	int needs_clflush_before = 0;
-	int release_page;
+	int i;
+	struct scatterlist *sg;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -752,9 +754,11 @@
 		 * right away and we therefore have to clflush anyway. */
 		if (obj->cache_level == I915_CACHE_NONE)
 			needs_clflush_after = 1;
-		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret)
-			return ret;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, true);
+			if (ret)
+				return ret;
+		}
 	}
 	/* Same trick applies for invalidate partially written cachelines before
 	 * writing.  */
@@ -762,13 +766,25 @@
 	    && obj->cache_level == I915_CACHE_NONE)
 		needs_clflush_before = 1;
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
 	offset = args->offset;
 	obj->dirty = 1;
 
-	while (remain > 0) {
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 		int partial_cacheline_write;
 
+		if (i < offset >> PAGE_SHIFT)
+			continue;
+
+		if (remain <= 0)
+			break;
+
 		/* Operation in this page
 		 *
 		 * shmem_page_offset = offset within page in shmem file
@@ -787,18 +803,7 @@
 			((shmem_page_offset | page_length)
 				& (boot_cpu_data.x86_clflush_size - 1));
 
-		if (obj->pages) {
-			page = obj->pages[offset >> PAGE_SHIFT];
-			release_page = 0;
-		} else {
-			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto out;
-			}
-			release_page = 1;
-		}
-
+		page = sg_page(sg);
 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
 			(page_to_phys(page) & (1 << 17)) != 0;
 
@@ -810,26 +815,20 @@
 			goto next_page;
 
 		hit_slowpath = 1;
-		page_cache_get(page);
 		mutex_unlock(&dev->struct_mutex);
-
 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
 					needs_clflush_after);
 
 		mutex_lock(&dev->struct_mutex);
-		page_cache_release(page);
+
 next_page:
 		set_page_dirty(page);
 		mark_page_accessed(page);
-		if (release_page)
-			page_cache_release(page);
 
-		if (ret) {
-			ret = -EFAULT;
+		if (ret)
 			goto out;
-		}
 
 		remain -= page_length;
 		user_data += page_length;
@@ -837,6 +836,8 @@
 	}
 
 out:
+	i915_gem_object_unpin_pages(obj);
+
 	if (hit_slowpath) {
 		/* Fixup: Kill any reinstated backing storage pages */
 		if (obj->madv == __I915_MADV_PURGED)
@@ -920,10 +921,8 @@
 		goto out;
 	}
 
-	if (obj->gtt_space &&
-	    obj->cache_level == I915_CACHE_NONE &&
+	if (obj->cache_level == I915_CACHE_NONE &&
 	    obj->tiling_mode == I915_TILING_NONE &&
-	    obj->map_and_fenceable &&
 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
 		/* Note that the gtt paths might fail with non-page-backed user
@@ -931,7 +930,7 @@
 		 * textures). Fallback to the shmem path in that case. */
 	}
 
-	if (ret == -EFAULT)
+	if (ret == -EFAULT || ret == -ENOSPC)
 		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 
 out:
@@ -941,6 +940,240 @@
 	return ret;
 }
 
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+		     bool interruptible)
+{
+	if (atomic_read(&dev_priv->mm.wedged)) {
+		struct completion *x = &dev_priv->error_completion;
+		bool recovery_complete;
+		unsigned long flags;
+
+		/* Give the error handler a chance to run. */
+		spin_lock_irqsave(&x->wait.lock, flags);
+		recovery_complete = x->done > 0;
+		spin_unlock_irqrestore(&x->wait.lock, flags);
+
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these. */
+		if (!interruptible)
+			return -EIO;
+
+		/* Recovery complete, but still wedged means reset failure. */
+		if (recovery_complete)
+			return -EIO;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	ret = 0;
+	if (seqno == ring->outstanding_lazy_request)
+		ret = i915_add_request(ring, NULL, NULL);
+
+	return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+			bool interruptible, struct timespec *timeout)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct timespec before, now, wait_time={1,0};
+	unsigned long timeout_jiffies;
+	long end;
+	bool wait_forever = true;
+	int ret;
+
+	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+		return 0;
+
+	trace_i915_gem_request_wait_begin(ring, seqno);
+
+	if (timeout != NULL) {
+		wait_time = *timeout;
+		wait_forever = false;
+	}
+
+	timeout_jiffies = timespec_to_jiffies(&wait_time);
+
+	if (WARN_ON(!ring->irq_get(ring)))
+		return -ENODEV;
+
+	/* Record current time in case interrupted by signal, or wedged * */
+	getrawmonotonic(&before);
+
+#define EXIT_COND \
+	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+	atomic_read(&dev_priv->mm.wedged))
+	do {
+		if (interruptible)
+			end = wait_event_interruptible_timeout(ring->irq_queue,
+							       EXIT_COND,
+							       timeout_jiffies);
+		else
+			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+						 timeout_jiffies);
+
+		ret = i915_gem_check_wedge(dev_priv, interruptible);
+		if (ret)
+			end = ret;
+	} while (end == 0 && wait_forever);
+
+	getrawmonotonic(&now);
+
+	ring->irq_put(ring);
+	trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+	if (timeout) {
+		struct timespec sleep_time = timespec_sub(now, before);
+		*timeout = timespec_sub(*timeout, sleep_time);
+	}
+
+	switch (end) {
+	case -EIO:
+	case -EAGAIN: /* Wedged */
+	case -ERESTARTSYS: /* Signal */
+		return (int)end;
+	case 0: /* Timeout */
+		if (timeout)
+			set_normalized_timespec(timeout, 0, 0);
+		return -ETIME;
+	default: /* Completed */
+		WARN_ON(end < 0); /* We're not aware of other errors */
+		return 0;
+	}
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(seqno == 0);
+
+	ret = i915_gem_check_wedge(dev_priv, interruptible);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct intel_ring_buffer *ring = obj->ring;
+	u32 seqno;
+	int ret;
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_wait_seqno(ring, seqno);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = obj->ring;
+	u32 seqno;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!dev_priv->mm.interruptible);
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_gem_check_wedge(dev_priv, true);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	mutex_unlock(&dev->struct_mutex);
+	ret = __wait_seqno(ring, seqno, true, NULL);
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return ret;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -978,6 +1211,14 @@
 		goto unlock;
 	}
 
+	/* Try to flush the object off the GPU without holding the lock.
+	 * We will repeat the flush holding the lock in the normal manner
+	 * to catch cases where we are gazumped.
+	 */
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	if (ret)
+		goto unref;
+
 	if (read_domains & I915_GEM_DOMAIN_GTT) {
 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
@@ -991,6 +1232,7 @@
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
+unref:
 	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
@@ -1110,7 +1352,7 @@
 			goto unlock;
 	}
 	if (!obj->gtt_space) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+		ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
 		if (ret)
 			goto unlock;
 
@@ -1271,6 +1513,42 @@
 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (obj->base.map_list.map)
+		return 0;
+
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	/* Badly fragmented mmap space? The only way we can recover
+	 * space is by destroying unwanted objects. We can't randomly release
+	 * mmap_offsets as userspace expects them to be persistent for the
+	 * lifetime of the objects. The closest we can is to release the
+	 * offsets on purgeable objects by truncating it and marking it purged,
+	 * which prevents userspace from ever using that object again.
+	 */
+	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	i915_gem_shrink_all(dev_priv);
+	return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	if (!obj->base.map_list.map)
+		return;
+
+	drm_gem_free_mmap_offset(&obj->base);
+}
+
 int
 i915_gem_mmap_gtt(struct drm_file *file,
 		  struct drm_device *dev,
@@ -1302,11 +1580,9 @@
 		goto out;
 	}
 
-	if (!obj->base.map_list.map) {
-		ret = drm_gem_create_mmap_offset(&obj->base);
-		if (ret)
-			goto out;
-	}
+	ret = i915_gem_object_create_mmap_offset(obj);
+	if (ret)
+		goto out;
 
 	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
@@ -1341,83 +1617,245 @@
 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
-int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
-			      gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-	int page_count, i;
-	struct address_space *mapping;
 	struct inode *inode;
-	struct page *page;
 
-	if (obj->pages || obj->sg_table)
-		return 0;
+	i915_gem_object_free_mmap_offset(obj);
 
-	/* Get the list of pages out of our struct file.  They'll be pinned
-	 * at this point until we release them.
+	if (obj->base.filp == NULL)
+		return;
+
+	/* Our goal here is to return as much of the memory as
+	 * is possible back to the system as we are called from OOM.
+	 * To do this we must instruct the shmfs to drop all of its
+	 * backing pages, *now*.
 	 */
-	page_count = obj->base.size / PAGE_SIZE;
-	BUG_ON(obj->pages != NULL);
-	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
-	if (obj->pages == NULL)
-		return -ENOMEM;
-
 	inode = obj->base.filp->f_path.dentry->d_inode;
-	mapping = inode->i_mapping;
-	gfpmask |= mapping_gfp_mask(mapping);
+	shmem_truncate_range(inode, 0, (loff_t)-1);
 
-	for (i = 0; i < page_count; i++) {
-		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
-		if (IS_ERR(page))
-			goto err_pages;
+	obj->madv = __I915_MADV_PURGED;
+}
 
-		obj->pages[i] = page;
-	}
-
-	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj);
-
-	return 0;
-
-err_pages:
-	while (i--)
-		page_cache_release(obj->pages[i]);
-
-	drm_free_large(obj->pages);
-	obj->pages = NULL;
-	return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+	return obj->madv == I915_MADV_DONTNEED;
 }
 
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
 	int page_count = obj->base.size / PAGE_SIZE;
-	int i;
-
-	if (!obj->pages)
-		return;
+	struct scatterlist *sg;
+	int ret, i;
 
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		/* In the event of a disaster, abandon all caches and
+		 * hope for the best.
+		 */
+		WARN_ON(ret != -EIO);
+		i915_gem_clflush_object(obj);
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
 		obj->dirty = 0;
 
-	for (i = 0; i < page_count; i++) {
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+
 		if (obj->dirty)
-			set_page_dirty(obj->pages[i]);
+			set_page_dirty(page);
 
 		if (obj->madv == I915_MADV_WILLNEED)
-			mark_page_accessed(obj->pages[i]);
+			mark_page_accessed(page);
 
-		page_cache_release(obj->pages[i]);
+		page_cache_release(page);
 	}
 	obj->dirty = 0;
 
-	drm_free_large(obj->pages);
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+	if (obj->pages == NULL)
+		return 0;
+
+	BUG_ON(obj->gtt_space);
+
+	if (obj->pages_pin_count)
+		return -EBUSY;
+
+	ops->put_pages(obj);
 	obj->pages = NULL;
+
+	list_del(&obj->gtt_list);
+	if (i915_gem_object_is_purgeable(obj))
+		i915_gem_object_truncate(obj);
+
+	return 0;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+	struct drm_i915_gem_object *obj, *next;
+	long count = 0;
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.unbound_list,
+				 gtt_list) {
+		if (i915_gem_object_is_purgeable(obj) &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (i915_gem_object_is_purgeable(obj) &&
+		    i915_gem_object_unbind(obj) == 0 &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	return count;
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj, *next;
+
+	i915_gem_evict_everything(dev_priv->dev);
+
+	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+		i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	struct page *page;
+	gfp_t gfp;
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+		sg_free_table(st);
+		kfree(st);
+		return -ENOMEM;
+	}
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 *
+	 * Fail silently without starting the shrinker
+	 */
+	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	gfp = mapping_gfp_mask(mapping);
+	gfp |= __GFP_NORETRY | __GFP_NOWARN;
+	gfp &= ~(__GFP_IO | __GFP_WAIT);
+	for_each_sg(st->sgl, sg, page_count, i) {
+		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		if (IS_ERR(page)) {
+			i915_gem_purge(dev_priv, page_count);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		}
+		if (IS_ERR(page)) {
+			/* We've tried hard to allocate the memory by reaping
+			 * our own buffer, now let the real VM do its job and
+			 * go down in flames if truly OOM.
+			 */
+			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+			gfp |= __GFP_IO | __GFP_WAIT;
+
+			i915_gem_shrink_all(dev_priv);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page))
+				goto err_pages;
+
+			gfp |= __GFP_NORETRY | __GFP_NOWARN;
+			gfp &= ~(__GFP_IO | __GFP_WAIT);
+		}
+
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+	}
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	obj->pages = st;
+	return 0;
+
+err_pages:
+	for_each_sg(st->sgl, sg, i, page_count)
+		page_cache_release(sg_page(sg));
+	sg_free_table(st);
+	kfree(st);
+	return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	int ret;
+
+	if (obj->pages)
+		return 0;
+
+	BUG_ON(obj->pages_pin_count);
+
+	ret = ops->get_pages(obj);
+	if (ret)
+		return ret;
+
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+	return 0;
 }
 
 void
@@ -1441,7 +1879,7 @@
 	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
-	obj->last_rendering_seqno = seqno;
+	obj->last_read_seqno = seqno;
 
 	if (obj->fenced_gpu_access) {
 		obj->last_fenced_seqno = seqno;
@@ -1458,97 +1896,35 @@
 }
 
 static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-	list_del_init(&obj->ring_list);
-	obj->last_rendering_seqno = 0;
-	obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	BUG_ON(!obj->active);
-	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-	i915_gem_object_move_off_active(obj);
-}
-
-static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+	BUG_ON(!obj->active);
+
+	if (obj->pin_count) /* are we a framebuffer? */
+		intel_mark_fb_idle(obj);
+
 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	BUG_ON(!list_empty(&obj->gpu_write_list));
-	BUG_ON(!obj->active);
+	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
 
-	i915_gem_object_move_off_active(obj);
+	obj->last_read_seqno = 0;
+	obj->last_write_seqno = 0;
+	obj->base.write_domain = 0;
+
+	obj->last_fenced_seqno = 0;
 	obj->fenced_gpu_access = false;
 
 	obj->active = 0;
-	obj->pending_gpu_write = false;
 	drm_gem_object_unreference(&obj->base);
 
 	WARN_ON(i915_verify_lists(dev));
 }
 
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
-	struct inode *inode;
-
-	/* Our goal here is to return as much of the memory as
-	 * is possible back to the system as we are called from OOM.
-	 * To do this we must instruct the shmfs to drop all of its
-	 * backing pages, *now*.
-	 */
-	inode = obj->base.filp->f_path.dentry->d_inode;
-	shmem_truncate_range(inode, 0, (loff_t)-1);
-
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
-
-	obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
-	return obj->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-			       uint32_t flush_domains)
-{
-	struct drm_i915_gem_object *obj, *next;
-
-	list_for_each_entry_safe(obj, next,
-				 &ring->gpu_write_list,
-				 gpu_write_list) {
-		if (obj->base.write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->base.write_domain;
-
-			obj->base.write_domain = 0;
-			list_del_init(&obj->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(ring));
-
-			trace_i915_gem_object_change_domain(obj,
-							    obj->base.read_domains,
-							    old_write_domain);
-		}
-	}
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1589,15 +1965,16 @@
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (ring->gpu_caches_dirty) {
-		ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
+	ret = intel_ring_flush_all_caches(ring);
+	if (ret)
+		return ret;
 
-		ring->gpu_caches_dirty = false;
+	if (request == NULL) {
+		request = kmalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
 	}
 
-	BUG_ON(request == NULL);
 	seqno = i915_gem_next_request_seqno(ring);
 
 	/* Record the position of the start of the request so that
@@ -1608,8 +1985,10 @@
 	request_ring_position = intel_ring_get_tail(ring);
 
 	ret = ring->add_request(ring, &seqno);
-	if (ret)
-	    return ret;
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 
 	trace_i915_gem_request_add(ring, seqno);
 
@@ -1619,6 +1998,7 @@
 	request->emitted_jiffies = jiffies;
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
+	request->file_priv = NULL;
 
 	if (file) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1638,13 +2018,13 @@
 				  jiffies +
 				  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 		}
-		if (was_empty)
+		if (was_empty) {
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work, HZ);
+			intel_mark_busy(dev_priv->dev);
+		}
 	}
 
-	WARN_ON(!list_empty(&ring->gpu_write_list));
-
 	return 0;
 }
 
@@ -1686,8 +2066,6 @@
 				       struct drm_i915_gem_object,
 				       ring_list);
 
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
 		i915_gem_object_move_to_inactive(obj);
 	}
 }
@@ -1723,20 +2101,6 @@
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
-	/* Remove anything from the flushing lists. The GPU cache is likely
-	 * to be lost on reset along with the data, so simply move the
-	 * lost bo to the inactive list.
-	 */
-	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj = list_first_entry(&dev_priv->mm.flushing_list,
-				      struct drm_i915_gem_object,
-				      mm_list);
-
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
-		i915_gem_object_move_to_inactive(obj);
-	}
-
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
@@ -1765,7 +2129,7 @@
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, true);
 
 	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
@@ -1804,13 +2168,10 @@
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
 			break;
 
-		if (obj->base.write_domain != 0)
-			i915_gem_object_move_to_flushing(obj);
-		else
-			i915_gem_object_move_to_inactive(obj);
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	if (unlikely(ring->trace_irq_seqno &&
@@ -1859,216 +2220,20 @@
 	 */
 	idle = true;
 	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty) {
-			struct drm_i915_gem_request *request;
-
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (request == NULL ||
-			    i915_add_request(ring, NULL, request))
-			    kfree(request);
-		}
+		if (ring->gpu_caches_dirty)
+			i915_add_request(ring, NULL, NULL);
 
 		idle &= list_empty(&ring->request_list);
 	}
 
 	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+	if (idle)
+		intel_mark_idle(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 }
 
-int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
-		     bool interruptible)
-{
-	if (atomic_read(&dev_priv->mm.wedged)) {
-		struct completion *x = &dev_priv->error_completion;
-		bool recovery_complete;
-		unsigned long flags;
-
-		/* Give the error handler a chance to run. */
-		spin_lock_irqsave(&x->wait.lock, flags);
-		recovery_complete = x->done > 0;
-		spin_unlock_irqrestore(&x->wait.lock, flags);
-
-		/* Non-interruptible callers can't handle -EAGAIN, hence return
-		 * -EIO unconditionally for these. */
-		if (!interruptible)
-			return -EIO;
-
-		/* Recovery complete, but still wedged means reset failure. */
-		if (recovery_complete)
-			return -EIO;
-
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
- */
-static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
-{
-	int ret = 0;
-
-	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
-	if (seqno == ring->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-
-		ret = i915_add_request(ring, NULL, request);
-		if (ret) {
-			kfree(request);
-			return ret;
-		}
-
-		BUG_ON(seqno != request->seqno);
-	}
-
-	return ret;
-}
-
-/**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
-			bool interruptible, struct timespec *timeout)
-{
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	struct timespec before, now, wait_time={1,0};
-	unsigned long timeout_jiffies;
-	long end;
-	bool wait_forever = true;
-	int ret;
-
-	if (i915_seqno_passed(ring->get_seqno(ring), seqno))
-		return 0;
-
-	trace_i915_gem_request_wait_begin(ring, seqno);
-
-	if (timeout != NULL) {
-		wait_time = *timeout;
-		wait_forever = false;
-	}
-
-	timeout_jiffies = timespec_to_jiffies(&wait_time);
-
-	if (WARN_ON(!ring->irq_get(ring)))
-		return -ENODEV;
-
-	/* Record current time in case interrupted by signal, or wedged * */
-	getrawmonotonic(&before);
-
-#define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
-	atomic_read(&dev_priv->mm.wedged))
-	do {
-		if (interruptible)
-			end = wait_event_interruptible_timeout(ring->irq_queue,
-							       EXIT_COND,
-							       timeout_jiffies);
-		else
-			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-						 timeout_jiffies);
-
-		ret = i915_gem_check_wedge(dev_priv, interruptible);
-		if (ret)
-			end = ret;
-	} while (end == 0 && wait_forever);
-
-	getrawmonotonic(&now);
-
-	ring->irq_put(ring);
-	trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
-
-	if (timeout) {
-		struct timespec sleep_time = timespec_sub(now, before);
-		*timeout = timespec_sub(*timeout, sleep_time);
-	}
-
-	switch (end) {
-	case -EIO:
-	case -EAGAIN: /* Wedged */
-	case -ERESTARTSYS: /* Signal */
-		return (int)end;
-	case 0: /* Timeout */
-		if (timeout)
-			set_normalized_timespec(timeout, 0, 0);
-		return -ETIME;
-	default: /* Completed */
-		WARN_ON(end < 0); /* We're not aware of other errors */
-		return 0;
-	}
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
-{
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	int ret = 0;
-
-	BUG_ON(seqno == 0);
-
-	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	ret = i915_gem_check_olr(ring, seqno);
-	if (ret)
-		return ret;
-
-	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
-
-	return ret;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-{
-	int ret;
-
-	/* This function only exists to support waiting for existing rendering,
-	 * not for emitting required flushes.
-	 */
-	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
-	/* If there is rendering queued on the buffer being evicted, wait for
-	 * it.
-	 */
-	if (obj->active) {
-		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
-		if (ret)
-			return ret;
-		i915_gem_retire_requests_ring(obj->ring);
-	}
-
-	return 0;
-}
-
 /**
  * Ensures that an object will eventually get non-busy by flushing any required
  * write domains, emitting any outstanding lazy request and retiring and
@@ -2080,14 +2245,10 @@
 	int ret;
 
 	if (obj->active) {
-		ret = i915_gem_object_flush_gpu_write_domain(obj);
+		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
 		if (ret)
 			return ret;
 
-		ret = i915_gem_check_olr(obj->ring,
-					 obj->last_rendering_seqno);
-		if (ret)
-			return ret;
 		i915_gem_retire_requests_ring(obj->ring);
 	}
 
@@ -2147,7 +2308,7 @@
 		goto out;
 
 	if (obj->active) {
-		seqno = obj->last_rendering_seqno;
+		seqno = obj->last_read_seqno;
 		ring = obj->ring;
 	}
 
@@ -2202,11 +2363,11 @@
 		return 0;
 
 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj);
+		return i915_gem_object_wait_rendering(obj, false);
 
 	idx = intel_ring_sync_index(from, to);
 
-	seqno = obj->last_rendering_seqno;
+	seqno = obj->last_read_seqno;
 	if (seqno <= from->sync_seqno[idx])
 		return 0;
 
@@ -2260,6 +2421,8 @@
 	if (obj->pin_count)
 		return -EBUSY;
 
+	BUG_ON(obj->pages == NULL);
+
 	ret = i915_gem_object_finish_gpu(obj);
 	if (ret)
 		return ret;
@@ -2270,22 +2433,6 @@
 
 	i915_gem_object_finish_gtt(obj);
 
-	/* Move the object to the CPU domain to ensure that
-	 * any possible CPU writes while it's not in the GTT
-	 * are flushed when we go to remap it.
-	 */
-	if (ret == 0)
-		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-	if (ret == -ERESTARTSYS)
-		return ret;
-	if (ret) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		i915_gem_clflush_object(obj);
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
-
 	/* release the fence reg _after_ flushing */
 	ret = i915_gem_object_put_fence(obj);
 	if (ret)
@@ -2301,10 +2448,8 @@
 	}
 	i915_gem_gtt_finish_object(obj);
 
-	i915_gem_object_put_pages_gtt(obj);
-
-	list_del_init(&obj->gtt_list);
-	list_del_init(&obj->mm_list);
+	list_del(&obj->mm_list);
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
 	obj->map_and_fenceable = true;
 
@@ -2312,48 +2457,14 @@
 	obj->gtt_space = NULL;
 	obj->gtt_offset = 0;
 
-	if (i915_gem_object_is_purgeable(obj))
-		i915_gem_object_truncate(obj);
-
-	return ret;
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
-{
-	int ret;
-
-	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
-	ret = ring->flush(ring, invalidate_domains, flush_domains);
-	if (ret)
-		return ret;
-
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		i915_gem_process_flushing_list(ring, flush_domains);
-
 	return 0;
 }
 
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-	int ret;
-
-	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+	if (list_empty(&ring->active_list))
 		return 0;
 
-	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(ring,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
-	}
-
 	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2372,10 +2483,6 @@
 		ret = i915_ring_idle(ring);
 		if (ret)
 			return ret;
-
-		/* Is the device fubar? */
-		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-			return -EBUSY;
 	}
 
 	return 0;
@@ -2548,21 +2655,8 @@
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 {
-	int ret;
-
-	if (obj->fenced_gpu_access) {
-		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(obj->ring,
-						  0, obj->base.write_domain);
-			if (ret)
-				return ret;
-		}
-
-		obj->fenced_gpu_access = false;
-	}
-
 	if (obj->last_fenced_seqno) {
-		ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
 		if (ret)
 			return ret;
 
@@ -2575,6 +2669,7 @@
 	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
 		mb();
 
+	obj->fenced_gpu_access = false;
 	return 0;
 }
 
@@ -2694,18 +2789,88 @@
 	return 0;
 }
 
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+				     struct drm_mm_node *gtt_space,
+				     unsigned long cache_level)
+{
+	struct drm_mm_node *other;
+
+	/* On non-LLC machines we have to be careful when putting differing
+	 * types of snoopable memory together to avoid the prefetcher
+	 * crossing memory domains and dieing.
+	 */
+	if (HAS_LLC(dev))
+		return true;
+
+	if (gtt_space == NULL)
+		return true;
+
+	if (list_empty(&gtt_space->node_list))
+		return true;
+
+	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+	if (other->allocated && !other->hole_follows && other->color != cache_level)
+		return false;
+
+	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+		return false;
+
+	return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->gtt_space == NULL) {
+			printk(KERN_ERR "object found on GTT list with no space reserved\n");
+			err++;
+			continue;
+		}
+
+		if (obj->cache_level != obj->gtt_space->color) {
+			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level,
+			       obj->gtt_space->color);
+			err++;
+			continue;
+		}
+
+		if (!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level)) {
+			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level);
+			err++;
+			continue;
+		}
+	}
+
+	WARN_ON(err);
+#endif
+}
+
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 			    unsigned alignment,
-			    bool map_and_fenceable)
+			    bool map_and_fenceable,
+			    bool nonblocking)
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_mm_node *free_space;
-	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
 	bool mappable, fenceable;
 	int ret;
@@ -2745,89 +2910,67 @@
 		return -E2BIG;
 	}
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
  search_free:
 	if (map_and_fenceable)
 		free_space =
-			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
-						    size, alignment,
-						    0, dev_priv->mm.gtt_mappable_end,
-						    0);
+			drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+							  size, alignment, obj->cache_level,
+							  0, dev_priv->mm.gtt_mappable_end,
+							  false);
 	else
-		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-						size, alignment, 0);
+		free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+						      size, alignment, obj->cache_level,
+						      false);
 
 	if (free_space != NULL) {
 		if (map_and_fenceable)
 			obj->gtt_space =
 				drm_mm_get_block_range_generic(free_space,
-							       size, alignment, 0,
+							       size, alignment, obj->cache_level,
 							       0, dev_priv->mm.gtt_mappable_end,
-							       0);
+							       false);
 		else
 			obj->gtt_space =
-				drm_mm_get_block(free_space, size, alignment);
+				drm_mm_get_block_generic(free_space,
+							 size, alignment, obj->cache_level,
+							 false);
 	}
 	if (obj->gtt_space == NULL) {
-		/* If the gtt is empty and we're still having trouble
-		 * fitting our object in, we're out of memory.
-		 */
 		ret = i915_gem_evict_something(dev, size, alignment,
-					       map_and_fenceable);
+					       obj->cache_level,
+					       map_and_fenceable,
+					       nonblocking);
 		if (ret)
 			return ret;
 
 		goto search_free;
 	}
-
-	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
-	if (ret) {
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level))) {
 		drm_mm_put_block(obj->gtt_space);
 		obj->gtt_space = NULL;
-
-		if (ret == -ENOMEM) {
-			/* first try to reclaim some memory by clearing the GTT */
-			ret = i915_gem_evict_everything(dev, false);
-			if (ret) {
-				/* now try to shrink everyone else */
-				if (gfpmask) {
-					gfpmask = 0;
-					goto search_free;
-				}
-
-				return -ENOMEM;
-			}
-
-			goto search_free;
-		}
-
-		return ret;
+		return -EINVAL;
 	}
 
+
 	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret) {
-		i915_gem_object_put_pages_gtt(obj);
 		drm_mm_put_block(obj->gtt_space);
 		obj->gtt_space = NULL;
-
-		if (i915_gem_evict_everything(dev, false))
-			return ret;
-
-		goto search_free;
+		return ret;
 	}
 
 	if (!dev_priv->mm.aliasing_ppgtt)
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 
-	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
 	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	/* Assert that the object is not currently in any GPU domain. As it
-	 * wasn't in the GTT, there shouldn't be any way it could have been in
-	 * a GPU cache
-	 */
-	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
-
 	obj->gtt_offset = obj->gtt_space->start;
 
 	fenceable =
@@ -2840,6 +2983,7 @@
 	obj->map_and_fenceable = mappable && fenceable;
 
 	trace_i915_gem_object_bind(obj, map_and_fenceable);
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
@@ -2866,18 +3010,7 @@
 
 	trace_i915_gem_object_clflush(obj);
 
-	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
-	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	/* Queue the GPU write cache flushing we need. */
-	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+	drm_clflush_sg(obj->pages);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2946,16 +3079,10 @@
 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (obj->pending_gpu_write || write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -2998,6 +3125,12 @@
 		return -EBUSY;
 	}
 
+	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+		ret = i915_gem_object_unbind(obj);
+		if (ret)
+			return ret;
+	}
+
 	if (obj->gtt_space) {
 		ret = i915_gem_object_finish_gpu(obj);
 		if (ret)
@@ -3009,7 +3142,7 @@
 		 * registers with snooped memory, so relinquish any fences
 		 * currently pointing to our region in the aperture.
 		 */
-		if (INTEL_INFO(obj->base.dev)->gen < 6) {
+		if (INTEL_INFO(dev)->gen < 6) {
 			ret = i915_gem_object_put_fence(obj);
 			if (ret)
 				return ret;
@@ -3020,6 +3153,8 @@
 		if (obj->has_aliasing_ppgtt_mapping)
 			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
 					       obj, cache_level);
+
+		obj->gtt_space->color = cache_level;
 	}
 
 	if (cache_level == I915_CACHE_NONE) {
@@ -3046,9 +3181,72 @@
 	}
 
 	obj->cache_level = cache_level;
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
+int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_gem_cacheing *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	args->cacheing = obj->cache_level != I915_CACHE_NONE;
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_gem_cacheing *args = data;
+	struct drm_i915_gem_object *obj;
+	enum i915_cache_level level;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	switch (args->cacheing) {
+	case I915_CACHEING_NONE:
+		level = I915_CACHE_NONE;
+		break;
+	case I915_CACHEING_CACHED:
+		level = I915_CACHE_LLC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	ret = i915_gem_object_set_cache_level(obj, level);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3062,10 +3260,6 @@
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret)
-		return ret;
-
 	if (pipelined != obj->ring) {
 		ret = i915_gem_object_sync(obj, pipelined);
 		if (ret)
@@ -3089,7 +3283,7 @@
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_object_pin(obj, alignment, true);
+	ret = i915_gem_object_pin(obj, alignment, true, false);
 	if (ret)
 		return ret;
 
@@ -3101,7 +3295,7 @@
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.write_domain = 0;
 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
 	trace_i915_gem_object_change_domain(obj,
@@ -3119,13 +3313,7 @@
 	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-		if (ret)
-			return ret;
-	}
-
-	ret = i915_gem_object_wait_rendering(obj);
+	ret = i915_gem_object_wait_rendering(obj, false);
 	if (ret)
 		return ret;
 
@@ -3149,16 +3337,10 @@
 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (write || obj->pending_gpu_write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -3238,7 +3420,8 @@
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    uint32_t alignment,
-		    bool map_and_fenceable)
+		    bool map_and_fenceable,
+		    bool nonblocking)
 {
 	int ret;
 
@@ -3263,7 +3446,8 @@
 
 	if (obj->gtt_space == NULL) {
 		ret = i915_gem_object_bind_to_gtt(obj, alignment,
-						  map_and_fenceable);
+						  map_and_fenceable,
+						  nonblocking);
 		if (ret)
 			return ret;
 	}
@@ -3321,7 +3505,7 @@
 	obj->user_pin_count++;
 	obj->pin_filp = file;
 	if (obj->user_pin_count == 1) {
-		ret = i915_gem_object_pin(obj, args->alignment, true);
+		ret = i915_gem_object_pin(obj, args->alignment, true, false);
 		if (ret)
 			goto out;
 	}
@@ -3401,6 +3585,10 @@
 	ret = i915_gem_object_flush_active(obj);
 
 	args->busy = obj->active;
+	if (obj->ring) {
+		BUILD_BUG_ON(I915_NUM_RINGS > 16);
+		args->busy |= intel_ring_flag(obj->ring) << 16;
+	}
 
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3449,9 +3637,8 @@
 	if (obj->madv != __I915_MADV_PURGED)
 		obj->madv = args->madv;
 
-	/* if the object is no longer bound, discard its backing storage */
-	if (i915_gem_object_is_purgeable(obj) &&
-	    obj->gtt_space == NULL)
+	/* if the object is no longer attached, discard its backing storage */
+	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
 		i915_gem_object_truncate(obj);
 
 	args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3463,10 +3650,32 @@
 	return ret;
 }
 
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			  const struct drm_i915_gem_object_ops *ops)
+{
+	INIT_LIST_HEAD(&obj->mm_list);
+	INIT_LIST_HEAD(&obj->gtt_list);
+	INIT_LIST_HEAD(&obj->ring_list);
+	INIT_LIST_HEAD(&obj->exec_list);
+
+	obj->ops = ops;
+
+	obj->fence_reg = I915_FENCE_REG_NONE;
+	obj->madv = I915_MADV_WILLNEED;
+	/* Avoid an unnecessary call to unbind on the first bind. */
+	obj->map_and_fenceable = true;
+
+	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+	.get_pages = i915_gem_object_get_pages_gtt,
+	.put_pages = i915_gem_object_put_pages_gtt,
+};
+
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	u32 mask;
@@ -3490,7 +3699,7 @@
 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	mapping_set_gfp_mask(mapping, mask);
 
-	i915_gem_info_add_obj(dev_priv, size);
+	i915_gem_object_init(obj, &i915_gem_object_ops);
 
 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3512,17 +3721,6 @@
 	} else
 		obj->cache_level = I915_CACHE_NONE;
 
-	obj->base.driver_private = NULL;
-	obj->fence_reg = I915_FENCE_REG_NONE;
-	INIT_LIST_HEAD(&obj->mm_list);
-	INIT_LIST_HEAD(&obj->gtt_list);
-	INIT_LIST_HEAD(&obj->ring_list);
-	INIT_LIST_HEAD(&obj->exec_list);
-	INIT_LIST_HEAD(&obj->gpu_write_list);
-	obj->madv = I915_MADV_WILLNEED;
-	/* Avoid an unnecessary call to unbind on the first bind. */
-	obj->map_and_fenceable = true;
-
 	return obj;
 }
 
@@ -3541,9 +3739,6 @@
 
 	trace_i915_gem_object_destroy(obj);
 
-	if (gem_obj->import_attach)
-		drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
 	if (obj->phys_obj)
 		i915_gem_detach_phys_object(dev, obj);
 
@@ -3559,8 +3754,14 @@
 		dev_priv->mm.interruptible = was_interruptible;
 	}
 
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
+	obj->pages_pin_count = 0;
+	i915_gem_object_put_pages(obj);
+	i915_gem_object_free_mmap_offset(obj);
+
+	BUG_ON(obj->pages);
+
+	if (obj->base.import_attach)
+		drm_prime_gem_destroy(&obj->base, NULL);
 
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3591,7 +3792,7 @@
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_gem_evict_everything(dev, false);
+		i915_gem_evict_everything(dev);
 
 	i915_gem_reset_fences(dev);
 
@@ -3892,7 +4093,6 @@
 	}
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 	mutex_unlock(&dev->struct_mutex);
 
@@ -3940,7 +4140,6 @@
 {
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
@@ -3950,10 +4149,10 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
-	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		init_ring_lists(&dev_priv->ring[i]);
 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4198,18 +4397,6 @@
 }
 
 static int
-i915_gpu_is_active(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int lists_empty;
-
-	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
-
-	return !lists_empty;
-}
-
-static int
 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 {
 	struct drm_i915_private *dev_priv =
@@ -4217,60 +4404,27 @@
 			     struct drm_i915_private,
 			     mm.inactive_shrinker);
 	struct drm_device *dev = dev_priv->dev;
-	struct drm_i915_gem_object *obj, *next;
+	struct drm_i915_gem_object *obj;
 	int nr_to_scan = sc->nr_to_scan;
 	int cnt;
 
 	if (!mutex_trylock(&dev->struct_mutex))
 		return 0;
 
-	/* "fast-path" to count number of available objects */
-	if (nr_to_scan == 0) {
-		cnt = 0;
-		list_for_each_entry(obj,
-				    &dev_priv->mm.inactive_list,
-				    mm_list)
-			cnt++;
-		mutex_unlock(&dev->struct_mutex);
-		return cnt / 100 * sysctl_vfs_cache_pressure;
+	if (nr_to_scan) {
+		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+		if (nr_to_scan > 0)
+			i915_gem_shrink_all(dev_priv);
 	}
 
-rescan:
-	/* first scan for clean buffers */
-	i915_gem_retire_requests(dev);
-
-	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list,
-				 mm_list) {
-		if (i915_gem_object_is_purgeable(obj)) {
-			if (i915_gem_object_unbind(obj) == 0 &&
-			    --nr_to_scan == 0)
-				break;
-		}
-	}
-
-	/* second pass, evict/count anything still on the inactive list */
 	cnt = 0;
-	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list,
-				 mm_list) {
-		if (nr_to_scan &&
-		    i915_gem_object_unbind(obj) == 0)
-			nr_to_scan--;
-		else
-			cnt++;
-	}
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+		if (obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
 
-	if (nr_to_scan && i915_gpu_is_active(dev)) {
-		/*
-		 * We are desperate for pages, so as a last resort, wait
-		 * for the GPU to finish and discard whatever we can.
-		 * This has a dramatic impact to reduce the number of
-		 * OOM-killer events whilst running the GPU aggressively.
-		 */
-		if (i915_gpu_idle(dev) == 0)
-			goto rescan;
-	}
 	mutex_unlock(&dev->struct_mutex);
-	return cnt / 100 * sysctl_vfs_cache_pressure;
+	return cnt;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a9d58d7..4aa7ecf 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
 
 static struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
 
 static int get_context_size(struct drm_device *dev)
 {
@@ -113,7 +112,10 @@
 		break;
 	case 7:
 		reg = I915_READ(GEN7_CXT_SIZE);
-		ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+		if (IS_HASWELL(dev))
+			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+		else
+			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
 	default:
 		BUG();
@@ -219,20 +221,21 @@
 	 * default context.
 	 */
 	dev_priv->ring[RCS].default_context = ctx;
-	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
-	if (ret) {
-		do_destroy(ctx);
-		return ret;
-	}
+	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+	if (ret)
+		goto err_destroy;
 
-	ret = do_switch(NULL, ctx, 0);
-	if (ret) {
-		i915_gem_object_unpin(ctx->obj);
-		do_destroy(ctx);
-	} else {
-		DRM_DEBUG_DRIVER("Default HW context loaded\n");
-	}
+	ret = do_switch(ctx);
+	if (ret)
+		goto err_unpin;
 
+	DRM_DEBUG_DRIVER("Default HW context loaded\n");
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(ctx->obj);
+err_destroy:
+	do_destroy(ctx);
 	return ret;
 }
 
@@ -359,18 +362,19 @@
 	return ret;
 }
 
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to,
-		     u32 seqno)
+static int do_switch(struct i915_hw_context *to)
 {
-	struct intel_ring_buffer *ring = NULL;
+	struct intel_ring_buffer *ring = to->ring;
+	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 	u32 hw_flags = 0;
 	int ret;
 
-	BUG_ON(to == NULL);
 	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
 
-	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+	if (from_obj == to->obj)
+		return 0;
+
+	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
 	if (ret)
 		return ret;
 
@@ -393,7 +397,6 @@
 	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
 		hw_flags |= MI_FORCE_RESTORE;
 
-	ring = to->ring;
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
 		i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from_obj != NULL) {
+		u32 seqno = i915_gem_next_request_seqno(ring);
 		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
 		i915_gem_object_move_to_active(from_obj, ring, seqno);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@
 		 * swapped, but there is no way to do that yet.
 		 */
 		from_obj->dirty = 1;
-		BUG_ON(from_obj->ring != to->ring);
+		BUG_ON(from_obj->ring != ring);
 		i915_gem_object_unpin(from_obj);
 
 		drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@
 			int to_id)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_file_private *file_priv = NULL;
 	struct i915_hw_context *to;
-	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
@@ -458,21 +460,18 @@
 	if (ring != &dev_priv->ring[RCS])
 		return 0;
 
-	if (file)
-		file_priv = file->driver_priv;
-
 	if (to_id == DEFAULT_CONTEXT_ID) {
 		to = ring->default_context;
 	} else {
-		to = i915_gem_context_get(file_priv, to_id);
+		if (file == NULL)
+			return -EINVAL;
+
+		to = i915_gem_context_get(file->driver_priv, to_id);
 		if (to == NULL)
 			return -ENOENT;
 	}
 
-	if (from_obj == to->obj)
-		return 0;
-
-	return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+	return do_switch(to);
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index aa308e1..ca3497e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
 #include <linux/dma-buf.h>
 
 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
-				      enum dma_data_direction dir)
+					     enum dma_data_direction dir)
 {
 	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
-	struct drm_device *dev = obj->base.dev;
-	int npages = obj->base.size / PAGE_SIZE;
-	struct sg_table *sg = NULL;
-	int ret;
-	int nents;
+	struct sg_table *st;
+	struct scatterlist *src, *dst;
+	int ret, i;
 
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = i915_mutex_lock_interruptible(obj->base.dev);
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (!obj->pages) {
-		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
-		if (ret)
-			goto out;
+	ret = i915_gem_object_get_pages(obj);
+	if (ret) {
+		st = ERR_PTR(ret);
+		goto out;
 	}
 
-	/* link the pages into an SG then map the sg */
-	sg = drm_prime_pages_to_sg(obj->pages, npages);
-	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	/* Copy sg so that we make an independent mapping */
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL) {
+		st = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(st);
+		st = ERR_PTR(ret);
+		goto out;
+	}
+
+	src = obj->pages->sgl;
+	dst = st->sgl;
+	for (i = 0; i < obj->pages->nents; i++) {
+		sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+		dst = sg_next(dst);
+		src = sg_next(src);
+	}
+
+	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+		sg_free_table(st);
+		kfree(st);
+		st = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	i915_gem_object_pin_pages(obj);
+
 out:
-	mutex_unlock(&dev->struct_mutex);
-	return sg;
+	mutex_unlock(&obj->base.dev->struct_mutex);
+	return st;
 }
 
 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
-			    struct sg_table *sg, enum dma_data_direction dir)
+				   struct sg_table *sg,
+				   enum dma_data_direction dir)
 {
 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 	sg_free_table(sg);
@@ -78,7 +105,9 @@
 {
 	struct drm_i915_gem_object *obj = dma_buf->priv;
 	struct drm_device *dev = obj->base.dev;
-	int ret;
+	struct scatterlist *sg;
+	struct page **pages;
+	int ret, i;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
@@ -89,24 +118,34 @@
 		goto out_unlock;
 	}
 
-	if (!obj->pages) {
-		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
-		if (ret) {
-			mutex_unlock(&dev->struct_mutex);
-			return ERR_PTR(ret);
-		}
-	}
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto error;
 
-	obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
-	if (!obj->dma_buf_vmapping) {
-		DRM_ERROR("failed to vmap object\n");
-		goto out_unlock;
-	}
+	ret = -ENOMEM;
+
+	pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+	if (pages == NULL)
+		goto error;
+
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+		pages[i] = sg_page(sg);
+
+	obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+	drm_free_large(pages);
+
+	if (!obj->dma_buf_vmapping)
+		goto error;
 
 	obj->vmapping_count = 1;
+	i915_gem_object_pin_pages(obj);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return obj->dma_buf_vmapping;
+
+error:
+	mutex_unlock(&dev->struct_mutex);
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@
 	if (ret)
 		return;
 
-	--obj->vmapping_count;
-	if (obj->vmapping_count == 0) {
+	if (--obj->vmapping_count == 0) {
 		vunmap(obj->dma_buf_vmapping);
 		obj->dma_buf_vmapping = NULL;
+
+		i915_gem_object_unpin_pages(obj);
 	}
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -151,6 +191,22 @@
 	return -EINVAL;
 }
 
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, write);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.map_dma_buf = i915_gem_map_dma_buf,
 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@
 	.mmap = i915_gem_dmabuf_mmap,
 	.vmap = i915_gem_dmabuf_vmap,
 	.vunmap = i915_gem_dmabuf_vunmap,
+	.begin_cpu_access = i915_gem_begin_cpu_access,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
-				struct drm_gem_object *gem_obj, int flags)
+				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 
-	return dma_buf_export(obj, &i915_dmabuf_ops,
-						  obj->base.size, 0600);
+	return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
 }
 
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *sg;
+
+	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
+
+	obj->pages = sg;
+	obj->has_dma_mapping = true;
+	return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	dma_buf_unmap_attachment(obj->base.import_attach,
+				 obj->pages, DMA_BIDIRECTIONAL);
+	obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+	.get_pages = i915_gem_object_get_pages_dmabuf,
+	.put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf)
+					     struct dma_buf *dma_buf)
 {
 	struct dma_buf_attachment *attach;
-	struct sg_table *sg;
 	struct drm_i915_gem_object *obj;
-	int npages;
-	int size;
 	int ret;
 
 	/* is this one of own objects? */
@@ -198,34 +276,24 @@
 	if (IS_ERR(attach))
 		return ERR_CAST(attach);
 
-	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
-		goto fail_detach;
-	}
-
-	size = dma_buf->size;
-	npages = size / PAGE_SIZE;
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (obj == NULL) {
 		ret = -ENOMEM;
-		goto fail_unmap;
+		goto fail_detach;
 	}
 
-	ret = drm_gem_private_object_init(dev, &obj->base, size);
+	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 	if (ret) {
 		kfree(obj);
-		goto fail_unmap;
+		goto fail_detach;
 	}
 
-	obj->sg_table = sg;
+	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 	obj->base.import_attach = attach;
 
 	return &obj->base;
 
-fail_unmap:
-	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 fail_detach:
 	dma_buf_detach(dma_buf, attach);
 	return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index eba0308..a2d8acd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -44,7 +44,8 @@
 
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size,
-			 unsigned alignment, bool mappable)
+			 unsigned alignment, unsigned cache_level,
+			 bool mappable, bool nonblocking)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
@@ -79,11 +80,11 @@
 	INIT_LIST_HEAD(&unwind_list);
 	if (mappable)
 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
-					    min_size, alignment, 0,
+					    min_size, alignment, cache_level,
 					    0, dev_priv->mm.gtt_mappable_end);
 	else
 		drm_mm_init_scan(&dev_priv->mm.gtt_space,
-				 min_size, alignment, 0);
+				 min_size, alignment, cache_level);
 
 	/* First see if there is a large enough contiguous idle region... */
 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -91,29 +92,16 @@
 			goto found;
 	}
 
+	if (nonblocking)
+		goto none;
+
 	/* Now merge in the soon-to-be-expired objects... */
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		/* Does the object require an outstanding flush? */
-		if (obj->base.write_domain)
-			continue;
-
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
-	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (!obj->base.write_domain)
-			continue;
-
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
-
+none:
 	/* Nothing found, clean up and bail out! */
 	while (!list_empty(&unwind_list)) {
 		obj = list_first_entry(&unwind_list,
@@ -164,7 +152,7 @@
 }
 
 int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj, *next;
@@ -172,12 +160,11 @@
 	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return -ENOSPC;
 
-	trace_i915_gem_evict_everything(dev, purgeable_only);
+	trace_i915_gem_evict_everything(dev);
 
 	/* The gpu_idle will flush everything in the write domain to the
 	 * active list. Then we must move everything off the active list
@@ -189,16 +176,11 @@
 
 	i915_gem_retire_requests(dev);
 
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
 	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list, mm_list) {
-		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-			if (obj->pin_count == 0)
-				WARN_ON(i915_gem_object_unbind(obj));
-		}
-	}
+				 &dev_priv->mm.inactive_list, mm_list)
+		if (obj->pin_count == 0)
+			WARN_ON(i915_gem_object_unbind(obj));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ff2819e..6a2f3e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,180 +34,6 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct change_domains {
-	uint32_t invalidate_domains;
-	uint32_t flush_domains;
-	uint32_t flush_rings;
-	uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Mapped to GTT
- *	4. Read by GPU
- *	5. Unmapped from GTT
- *	6. Freed
- *
- *	Let's take these a step at a time
- *
- *	1. Allocated
- *		Pages allocated from the kernel may still have
- *		cache contents, so we set them to (CPU, CPU) always.
- *	2. Written by CPU (using pwrite)
- *		The pwrite function calls set_domain (CPU, CPU) and
- *		this function does nothing (as nothing changes)
- *	3. Mapped by GTT
- *		This function asserts that the object is not
- *		currently in any GPU-based read or write domains
- *	4. Read by GPU
- *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
- *		As write_domain is zero, this function adds in the
- *		current read domains (CPU+COMMAND, 0).
- *		flush_domains is set to CPU.
- *		invalidate_domains is set to COMMAND
- *		clflush is run to get data out of the CPU caches
- *		then i915_dev_set_domain calls i915_gem_flush to
- *		emit an MI_FLUSH and drm_agp_chipset_flush
- *	5. Unmapped from GTT
- *		i915_gem_object_unbind calls set_domain (CPU, CPU)
- *		flush_domains and invalidate_domains end up both zero
- *		so no flushing/invalidating happens
- *	6. Freed
- *		yay, done
- *
- * Case 2: The shared render buffer
- *
- *	1. Allocated
- *	2. Mapped to GTT
- *	3. Read/written by GPU
- *	4. set_domain to (CPU,CPU)
- *	5. Read/written by CPU
- *	6. Read/written by GPU
- *
- *	1. Allocated
- *		Same as last example, (CPU, CPU)
- *	2. Mapped to GTT
- *		Nothing changes (assertions find that it is not in the GPU)
- *	3. Read/written by GPU
- *		execbuffer calls set_domain (RENDER, RENDER)
- *		flush_domains gets CPU
- *		invalidate_domains gets GPU
- *		clflush (obj)
- *		MI_FLUSH and drm_agp_chipset_flush
- *	4. set_domain (CPU, CPU)
- *		flush_domains gets GPU
- *		invalidate_domains gets CPU
- *		wait_rendering (obj) to make sure all drawing is complete.
- *		This will include an MI_FLUSH to get the data from GPU
- *		to memory
- *		clflush (obj) to invalidate the CPU cache
- *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- *	5. Read/written by CPU
- *		cache lines are loaded and dirtied
- *	6. Read written by GPU
- *		Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Read by GPU
- *	4. Updated (written) by CPU again
- *	5. Read by GPU
- *
- *	1. Allocated
- *		(CPU, CPU)
- *	2. Written by CPU
- *		(CPU, CPU)
- *	3. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- *	4. Updated (written) by CPU again
- *		(CPU, CPU)
- *		flush_domains = 0 (no previous write domain)
- *		invalidate_domains = 0 (no new read domains)
- *	5. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
-				  struct intel_ring_buffer *ring,
-				  struct change_domains *cd)
-{
-	uint32_t invalidate_domains = 0, flush_domains = 0;
-
-	/*
-	 * If the object isn't moving to a new write domain,
-	 * let the object stay in multiple read domains
-	 */
-	if (obj->base.pending_write_domain == 0)
-		obj->base.pending_read_domains |= obj->base.read_domains;
-
-	/*
-	 * Flush the current write domain if
-	 * the new read domains don't match. Invalidate
-	 * any read domains which differ from the old
-	 * write domain
-	 */
-	if (obj->base.write_domain &&
-	    (((obj->base.write_domain != obj->base.pending_read_domains ||
-	       obj->ring != ring)) ||
-	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
-		flush_domains |= obj->base.write_domain;
-		invalidate_domains |=
-			obj->base.pending_read_domains & ~obj->base.write_domain;
-	}
-	/*
-	 * Invalidate any read caches which may have
-	 * stale data. That is, any new read domains.
-	 */
-	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
-		i915_gem_clflush_object(obj);
-
-	if (obj->base.pending_write_domain)
-		cd->flips |= atomic_read(&obj->pending_flip);
-
-	/* The actual obj->write_domain will be updated with
-	 * pending_write_domain after we emit the accumulated flush for all
-	 * of our domain changes in execbuffers (which clears objects'
-	 * write_domains).  So if we have a current write domain that we
-	 * aren't changing, set pending_write_domain to that.
-	 */
-	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
-		obj->base.pending_write_domain = obj->base.write_domain;
-
-	cd->invalidate_domains |= invalidate_domains;
-	cd->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(obj->ring);
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(ring);
-}
-
 struct eb_objects {
 	int and;
 	struct hlist_head buckets[0];
@@ -218,6 +44,7 @@
 {
 	struct eb_objects *eb;
 	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+	BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
 	while (count > size)
 		count >>= 1;
 	eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -269,6 +96,7 @@
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
 	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+		!obj->map_and_fenceable ||
 		obj->cache_level != I915_CACHE_NONE);
 }
 
@@ -383,7 +211,8 @@
 		if (ret)
 			return ret;
 
-		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+		vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+							     reloc->offset >> PAGE_SHIFT));
 		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
 		kunmap_atomic(vaddr);
 	} else {
@@ -504,7 +333,8 @@
 	return ret;
 }
 
-#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
 need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -514,9 +344,10 @@
 }
 
 static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
-		     struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+				   struct intel_ring_buffer *ring)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
@@ -528,15 +359,17 @@
 		obj->tiling_mode != I915_TILING_NONE;
 	need_mappable = need_fence || need_reloc_mappable(obj);
 
-	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
 	if (ret)
 		return ret;
 
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
 	if (has_fenced_gpu_access) {
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			ret = i915_gem_object_get_fence(obj);
 			if (ret)
-				goto err_unpin;
+				return ret;
 
 			if (i915_gem_object_pin_fence(obj))
 				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -545,12 +378,35 @@
 		}
 	}
 
+	/* Ensure ppgtt mapping exists if needed */
+	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+				       obj, obj->cache_level);
+
+		obj->has_aliasing_ppgtt_mapping = 1;
+	}
+
 	entry->offset = obj->gtt_offset;
 	return 0;
+}
 
-err_unpin:
-	i915_gem_object_unpin(obj);
-	return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_gem_exec_object2 *entry;
+
+	if (!obj->gtt_space)
+		return;
+
+	entry = obj->exec_entry;
+
+	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+		i915_gem_object_unpin_fence(obj);
+
+	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+		i915_gem_object_unpin(obj);
+
+	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
 
 static int
@@ -558,11 +414,10 @@
 			    struct drm_file *file,
 			    struct list_head *objects)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	int ret, retry;
-	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	struct list_head ordered_objects;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	int retry;
 
 	INIT_LIST_HEAD(&ordered_objects);
 	while (!list_empty(objects)) {
@@ -587,6 +442,7 @@
 
 		obj->base.pending_read_domains = 0;
 		obj->base.pending_write_domain = 0;
+		obj->pending_fenced_gpu_access = false;
 	}
 	list_splice(&ordered_objects, objects);
 
@@ -599,12 +455,12 @@
 	 * 2.  Bind new objects.
 	 * 3.  Decrement pin count.
 	 *
-	 * This avoid unnecessary unbinding of later objects in order to makr
+	 * This avoid unnecessary unbinding of later objects in order to make
 	 * room for the earlier objects *unless* we need to defragment.
 	 */
 	retry = 0;
 	do {
-		ret = 0;
+		int ret = 0;
 
 		/* Unbind any ill-fitting objects or pin. */
 		list_for_each_entry(obj, objects, exec_list) {
@@ -624,7 +480,7 @@
 			    (need_mappable && !obj->map_and_fenceable))
 				ret = i915_gem_object_unbind(obj);
 			else
-				ret = pin_and_fence_object(obj, ring);
+				ret = i915_gem_execbuffer_reserve_object(obj, ring);
 			if (ret)
 				goto err;
 		}
@@ -634,77 +490,22 @@
 			if (obj->gtt_space)
 				continue;
 
-			ret = pin_and_fence_object(obj, ring);
-			if (ret) {
-				int ret_ignore;
-
-				/* This can potentially raise a harmless
-				 * -EINVAL if we failed to bind in the above
-				 * call. It cannot raise -EINTR since we know
-				 * that the bo is freshly bound and so will
-				 * not need to be flushed or waited upon.
-				 */
-				ret_ignore = i915_gem_object_unbind(obj);
-				(void)ret_ignore;
-				WARN_ON(obj->gtt_space);
-				break;
-			}
+			ret = i915_gem_execbuffer_reserve_object(obj, ring);
+			if (ret)
+				goto err;
 		}
 
-		/* Decrement pin count for bound objects */
-		list_for_each_entry(obj, objects, exec_list) {
-			struct drm_i915_gem_exec_object2 *entry;
+err:		/* Decrement pin count for bound objects */
+		list_for_each_entry(obj, objects, exec_list)
+			i915_gem_execbuffer_unreserve_object(obj);
 
-			if (!obj->gtt_space)
-				continue;
-
-			entry = obj->exec_entry;
-			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				i915_gem_object_unpin_fence(obj);
-				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
-			}
-
-			i915_gem_object_unpin(obj);
-
-			/* ... and ensure ppgtt mapping exist if needed. */
-			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-						       obj, obj->cache_level);
-
-				obj->has_aliasing_ppgtt_mapping = 1;
-			}
-		}
-
-		if (ret != -ENOSPC || retry > 1)
+		if (ret != -ENOSPC || retry++)
 			return ret;
 
-		/* First attempt, just clear anything that is purgeable.
-		 * Second attempt, clear the entire GTT.
-		 */
-		ret = i915_gem_evict_everything(ring->dev, retry == 0);
+		ret = i915_gem_evict_everything(ring->dev);
 		if (ret)
 			return ret;
-
-		retry++;
 	} while (1);
-
-err:
-	list_for_each_entry_continue_reverse(obj, objects, exec_list) {
-		struct drm_i915_gem_exec_object2 *entry;
-
-		if (!obj->gtt_space)
-			continue;
-
-		entry = obj->exec_entry;
-		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-			i915_gem_object_unpin_fence(obj);
-			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
-		}
-
-		i915_gem_object_unpin(obj);
-	}
-
-	return ret;
 }
 
 static int
@@ -810,18 +611,6 @@
 	return ret;
 }
 
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
-			  uint32_t invalidate_domains,
-			  uint32_t flush_domains)
-{
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		intel_gtt_chipset_flush();
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-}
-
 static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
@@ -854,48 +643,45 @@
 	return 0;
 }
 
-
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 				struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
-	struct change_domains cd;
+	uint32_t flush_domains = 0;
+	uint32_t flips = 0;
 	int ret;
 
-	memset(&cd, 0, sizeof(cd));
-	list_for_each_entry(obj, objects, exec_list)
-		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
-	if (cd.invalidate_domains | cd.flush_domains) {
-		i915_gem_execbuffer_flush(ring->dev,
-					  cd.invalidate_domains,
-					  cd.flush_domains);
-	}
-
-	if (cd.flips) {
-		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
-		if (ret)
-			return ret;
-	}
-
 	list_for_each_entry(obj, objects, exec_list) {
 		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			i915_gem_clflush_object(obj);
+
+		if (obj->base.pending_write_domain)
+			flips |= atomic_read(&obj->pending_flip);
+
+		flush_domains |= obj->base.write_domain;
 	}
 
+	if (flips) {
+		ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
+		if (ret)
+			return ret;
+	}
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	ret = i915_gem_flush_ring(ring,
-				  I915_GEM_GPU_DOMAINS,
-				  ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
-	if (ret)
-		return ret;
-
-	ring->gpu_caches_dirty = false;
-	return 0;
+	return intel_ring_invalidate_all_caches(ring);
 }
 
 static bool
@@ -943,9 +729,8 @@
 	struct drm_i915_gem_object *obj;
 
 	list_for_each_entry(obj, objects, exec_list) {
-		  u32 old_read = obj->base.read_domains;
-		  u32 old_write = obj->base.write_domain;
-
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
 
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.write_domain = obj->base.pending_write_domain;
@@ -954,17 +739,13 @@
 		i915_gem_object_move_to_active(obj, ring, seqno);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->pending_gpu_write = true;
-			list_move_tail(&obj->gpu_write_list,
-				       &ring->gpu_write_list);
+			obj->last_write_seqno = seqno;
 			if (obj->pin_count) /* check for potential scanout */
-				intel_mark_busy(ring->dev, obj);
+				intel_mark_fb_busy(obj);
 		}
 
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
-
-	intel_mark_busy(ring->dev, NULL);
 }
 
 static void
@@ -972,16 +753,11 @@
 				    struct drm_file *file,
 				    struct intel_ring_buffer *ring)
 {
-	struct drm_i915_gem_request *request;
-
 	/* Unconditionally force add_request to emit a full flush. */
 	ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL || i915_add_request(ring, file, request)) {
-		kfree(request);
-	}
+	(void)i915_add_request(ring, file, NULL);
 }
 
 static int
@@ -1327,8 +1103,7 @@
 		return -ENOMEM;
 	}
 	ret = copy_from_user(exec_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
+			     (void __user *)(uintptr_t)args->buffers_ptr,
 			     sizeof(*exec_list) * args->buffer_count);
 	if (ret != 0) {
 		DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1367,8 +1142,7 @@
 		for (i = 0; i < args->buffer_count; i++)
 			exec_list[i].offset = exec2_list[i].offset;
 		/* ... and back out to userspace */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
+		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
 				   exec_list,
 				   sizeof(*exec_list) * args->buffer_count);
 		if (ret) {
@@ -1422,8 +1196,7 @@
 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
 	if (!ret) {
 		/* Copy the new buffer offsets back to the user's exec list. */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
+		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
 				   exec2_list,
 				   sizeof(*exec2_list) * args->buffer_count);
 		if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 60815b8..47e427e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -167,8 +167,7 @@
 }
 
 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
-					 struct scatterlist *sg_list,
-					 unsigned sg_len,
+					 const struct sg_table *pages,
 					 unsigned first_entry,
 					 uint32_t pte_flags)
 {
@@ -180,12 +179,12 @@
 	struct scatterlist *sg;
 
 	/* init sg walking */
-	sg = sg_list;
+	sg = pages->sgl;
 	i = 0;
 	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
 	m = 0;
 
-	while (i < sg_len) {
+	while (i < pages->nents) {
 		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
 
 		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -194,13 +193,11 @@
 			pt_vaddr[j] = pte | pte_flags;
 
 			/* grab the next page */
-			m++;
-			if (m == segment_len) {
-				sg = sg_next(sg);
-				i++;
-				if (i == sg_len)
+			if (++m == segment_len) {
+				if (++i == pages->nents)
 					break;
 
+				sg = sg_next(sg);
 				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
 				m = 0;
 			}
@@ -213,44 +210,10 @@
 	}
 }
 
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
-				    unsigned first_entry, unsigned num_entries,
-				    struct page **pages, uint32_t pte_flags)
-{
-	uint32_t *pt_vaddr, pte;
-	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-	unsigned last_pte, i;
-	dma_addr_t page_addr;
-
-	while (num_entries) {
-		last_pte = first_pte + num_entries;
-		last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
-		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
-		for (i = first_pte; i < last_pte; i++) {
-			page_addr = page_to_phys(*pages);
-			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-			pt_vaddr[i] = pte | pte_flags;
-
-			pages++;
-		}
-
-		kunmap_atomic(pt_vaddr);
-
-		num_entries -= last_pte - first_pte;
-		first_pte = 0;
-		act_pd++;
-	}
-}
-
 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 			    struct drm_i915_gem_object *obj,
 			    enum i915_cache_level cache_level)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t pte_flags = GEN6_PTE_VALID;
 
 	switch (cache_level) {
@@ -261,7 +224,7 @@
 		pte_flags |= GEN6_PTE_CACHE_LLC;
 		break;
 	case I915_CACHE_NONE:
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(obj->base.dev))
 			pte_flags |= HSW_PTE_UNCACHED;
 		else
 			pte_flags |= GEN6_PTE_UNCACHED;
@@ -270,26 +233,10 @@
 		BUG();
 	}
 
-	if (obj->sg_table) {
-		i915_ppgtt_insert_sg_entries(ppgtt,
-					     obj->sg_table->sgl,
-					     obj->sg_table->nents,
-					     obj->gtt_space->start >> PAGE_SHIFT,
-					     pte_flags);
-	} else if (dev_priv->mm.gtt->needs_dmar) {
-		BUG_ON(!obj->sg_list);
-
-		i915_ppgtt_insert_sg_entries(ppgtt,
-					     obj->sg_list,
-					     obj->num_sg,
-					     obj->gtt_space->start >> PAGE_SHIFT,
-					     pte_flags);
-	} else
-		i915_ppgtt_insert_pages(ppgtt,
-					obj->gtt_space->start >> PAGE_SHIFT,
-					obj->base.size >> PAGE_SHIFT,
-					obj->pages,
-					pte_flags);
+	i915_ppgtt_insert_sg_entries(ppgtt,
+				     obj->pages,
+				     obj->gtt_space->start >> PAGE_SHIFT,
+				     pte_flags);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -351,7 +298,7 @@
 	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
 			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		i915_gem_clflush_object(obj);
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
@@ -361,44 +308,26 @@
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* don't map imported dma buf objects */
-	if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
-		return intel_gtt_map_memory(obj->pages,
-					    obj->base.size >> PAGE_SHIFT,
-					    &obj->sg_list,
-					    &obj->num_sg);
-	else
+	if (obj->has_dma_mapping)
 		return 0;
+
+	if (!dma_map_sg(&obj->base.dev->pdev->dev,
+			obj->pages->sgl, obj->pages->nents,
+			PCI_DMA_BIDIRECTIONAL))
+		return -ENOSPC;
+
+	return 0;
 }
 
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 			      enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
 
-	if (obj->sg_table) {
-		intel_gtt_insert_sg_entries(obj->sg_table->sgl,
-					    obj->sg_table->nents,
-					    obj->gtt_space->start >> PAGE_SHIFT,
-					    agp_type);
-	} else if (dev_priv->mm.gtt->needs_dmar) {
-		BUG_ON(!obj->sg_list);
-
-		intel_gtt_insert_sg_entries(obj->sg_list,
-					    obj->num_sg,
-					    obj->gtt_space->start >> PAGE_SHIFT,
-					    agp_type);
-	} else
-		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
-				       obj->base.size >> PAGE_SHIFT,
-				       obj->pages,
-				       agp_type);
-
+	intel_gtt_insert_sg_entries(obj->pages,
+				    obj->gtt_space->start >> PAGE_SHIFT,
+				    agp_type);
 	obj->has_global_gtt_mapping = 1;
 }
 
@@ -418,14 +347,31 @@
 
 	interruptible = do_idling(dev_priv);
 
-	if (obj->sg_list) {
-		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
-		obj->sg_list = NULL;
-	}
+	if (!obj->has_dma_mapping)
+		dma_unmap_sg(&dev->pdev->dev,
+			     obj->pages->sgl, obj->pages->nents,
+			     PCI_DMA_BIDIRECTIONAL);
 
 	undo_idling(dev_priv, interruptible);
 }
 
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+				  unsigned long color,
+				  unsigned long *start,
+				  unsigned long *end)
+{
+	if (node->color != color)
+		*start += 4096;
+
+	if (!list_empty(&node->node_list)) {
+		node = list_entry(node->node_list.next,
+				  struct drm_mm_node,
+				  node_list);
+		if (node->allocated && node->color != color)
+			*end -= 4096;
+	}
+}
+
 void i915_gem_init_global_gtt(struct drm_device *dev,
 			      unsigned long start,
 			      unsigned long mappable_end,
@@ -435,6 +381,8 @@
 
 	/* Substract the guard page ... */
 	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+	if (!HAS_LLC(dev))
+		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
 	dev_priv->mm.gtt_start = start;
 	dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b964df5..8093ecd 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -470,18 +470,20 @@
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+	struct scatterlist *sg;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (obj->bit_17 == NULL)
 		return;
 
-	for (i = 0; i < page_count; i++) {
-		char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+		char new_bit_17 = page_to_phys(page) >> 17;
 		if ((new_bit_17 & 0x1) !=
 		    (test_bit(i, obj->bit_17) != 0)) {
-			i915_gem_swizzle_page(obj->pages[i]);
-			set_page_dirty(obj->pages[i]);
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
 		}
 	}
 }
@@ -489,6 +491,7 @@
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+	struct scatterlist *sg;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
@@ -502,8 +505,9 @@
 		}
 	}
 
-	for (i = 0; i < page_count; i++) {
-		if (page_to_phys(obj->pages[i]) & (1 << 17))
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+		if (page_to_phys(page) & (1 << 17))
 			__set_bit(i, obj->bit_17);
 		else
 			__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5249640..d7f0066 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -296,11 +296,21 @@
 	drm_helper_hpd_irq_event(dev);
 }
 
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 busy_up, busy_down, max_avg, min_avg;
-	u8 new_delay = dev_priv->cur_delay;
+	u8 new_delay;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchdev_lock, flags);
+
+	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+	new_delay = dev_priv->ips.cur_delay;
 
 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 	busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -310,19 +320,21 @@
 
 	/* Handle RCS change request from hw */
 	if (busy_up > max_avg) {
-		if (dev_priv->cur_delay != dev_priv->max_delay)
-			new_delay = dev_priv->cur_delay - 1;
-		if (new_delay < dev_priv->max_delay)
-			new_delay = dev_priv->max_delay;
+		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.cur_delay - 1;
+		if (new_delay < dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.max_delay;
 	} else if (busy_down < min_avg) {
-		if (dev_priv->cur_delay != dev_priv->min_delay)
-			new_delay = dev_priv->cur_delay + 1;
-		if (new_delay > dev_priv->min_delay)
-			new_delay = dev_priv->min_delay;
+		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.cur_delay + 1;
+		if (new_delay > dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.min_delay;
 	}
 
 	if (ironlake_set_drps(dev, new_delay))
-		dev_priv->cur_delay = new_delay;
+		dev_priv->ips.cur_delay = new_delay;
+
+	spin_unlock_irqrestore(&mchdev_lock, flags);
 
 	return;
 }
@@ -335,7 +347,7 @@
 	if (ring->obj == NULL)
 		return;
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
 	wake_up_all(&ring->irq_queue);
 	if (i915_enable_hangcheck) {
@@ -349,16 +361,16 @@
 static void gen6_pm_rps_work(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps_work);
+						    rps.work);
 	u32 pm_iir, pm_imr;
 	u8 new_delay;
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	pm_iir = dev_priv->pm_iir;
-	dev_priv->pm_iir = 0;
+	spin_lock_irq(&dev_priv->rps.lock);
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
@@ -366,11 +378,17 @@
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
-		new_delay = dev_priv->cur_delay + 1;
+		new_delay = dev_priv->rps.cur_delay + 1;
 	else
-		new_delay = dev_priv->cur_delay - 1;
+		new_delay = dev_priv->rps.cur_delay - 1;
 
-	gen6_set_rps(dev_priv->dev, new_delay);
+	/* sysfs frequency interfaces may have snuck in while servicing the
+	 * interrupt
+	 */
+	if (!(new_delay > dev_priv->rps.max_delay ||
+	      new_delay < dev_priv->rps.min_delay)) {
+		gen6_set_rps(dev_priv->dev, new_delay);
+	}
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
@@ -444,7 +462,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long flags;
 
-	if (!IS_IVYBRIDGE(dev))
+	if (!HAS_L3_GPU_CACHE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -488,19 +506,19 @@
 	 * IIR bits should never already be set because IMR should
 	 * prevent an interrupt from being shown in IIR. The warning
 	 * displays a case where we've unsafely cleared
-	 * dev_priv->pm_iir. Although missing an interrupt of the same
+	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
 	 * type is not a problem, it displays a problem in the logic.
 	 *
-	 * The mask bit in IMR is cleared by rps_work.
+	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock_irqsave(&dev_priv->rps_lock, flags);
-	dev_priv->pm_iir |= pm_iir;
-	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-	queue_work(dev_priv->wq, &dev_priv->rps_work);
+	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -793,10 +811,8 @@
 			ibx_irq_handler(dev, pch_iir);
 	}
 
-	if (de_iir & DE_PCU_EVENT) {
-		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
-		i915_handle_rps_change(dev);
-	}
+	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
+		ironlake_handle_rps_change(dev);
 
 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
 		gen6_queue_rps_work(dev_priv, pm_iir);
@@ -843,26 +859,55 @@
 	}
 }
 
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+				    uint32_t *instdone)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+	switch(INTEL_INFO(dev)->gen) {
+	case 2:
+	case 3:
+		instdone[0] = I915_READ(INSTDONE);
+		break;
+	case 4:
+	case 5:
+	case 6:
+		instdone[0] = I915_READ(INSTDONE_I965);
+		instdone[1] = I915_READ(INSTDONE1);
+		break;
+	default:
+		WARN_ONCE(1, "Unsupported platform\n");
+	case 7:
+		instdone[0] = I915_READ(GEN7_INSTDONE_1);
+		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+		break;
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *dev_priv,
 			 struct drm_i915_gem_object *src)
 {
 	struct drm_i915_error_object *dst;
-	int page, page_count;
+	int i, count;
 	u32 reloc_offset;
 
 	if (src == NULL || src->pages == NULL)
 		return NULL;
 
-	page_count = src->base.size / PAGE_SIZE;
+	count = src->base.size / PAGE_SIZE;
 
-	dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
 	if (dst == NULL)
 		return NULL;
 
 	reloc_offset = src->gtt_offset;
-	for (page = 0; page < page_count; page++) {
+	for (i = 0; i < count; i++) {
 		unsigned long flags;
 		void *d;
 
@@ -885,30 +930,33 @@
 			memcpy_fromio(d, s, PAGE_SIZE);
 			io_mapping_unmap_atomic(s);
 		} else {
+			struct page *page;
 			void *s;
 
-			drm_clflush_pages(&src->pages[page], 1);
+			page = i915_gem_object_get_page(src, i);
 
-			s = kmap_atomic(src->pages[page]);
+			drm_clflush_pages(&page, 1);
+
+			s = kmap_atomic(page);
 			memcpy(d, s, PAGE_SIZE);
 			kunmap_atomic(s);
 
-			drm_clflush_pages(&src->pages[page], 1);
+			drm_clflush_pages(&page, 1);
 		}
 		local_irq_restore(flags);
 
-		dst->pages[page] = d;
+		dst->pages[i] = d;
 
 		reloc_offset += PAGE_SIZE;
 	}
-	dst->page_count = page_count;
+	dst->page_count = count;
 	dst->gtt_offset = src->gtt_offset;
 
 	return dst;
 
 unwind:
-	while (page--)
-		kfree(dst->pages[page]);
+	while (i--)
+		kfree(dst->pages[i]);
 	kfree(dst);
 	return NULL;
 }
@@ -949,7 +997,8 @@
 {
 	err->size = obj->base.size;
 	err->name = obj->base.name;
-	err->seqno = obj->last_rendering_seqno;
+	err->rseqno = obj->last_read_seqno;
+	err->wseqno = obj->last_write_seqno;
 	err->gtt_offset = obj->gtt_offset;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -1039,12 +1088,12 @@
 	if (!ring->get_seqno)
 		return NULL;
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, false);
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		if (obj->ring != ring)
 			continue;
 
-		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (i915_seqno_passed(seqno, obj->last_read_seqno))
 			continue;
 
 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1080,10 +1129,8 @@
 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-		if (ring->id == RCS) {
-			error->instdone1 = I915_READ(INSTDONE1);
+		if (ring->id == RCS)
 			error->bbaddr = I915_READ64(BB_ADDR);
-		}
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1093,7 +1140,7 @@
 
 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-	error->seqno[ring->id] = ring->get_seqno(ring);
+	error->seqno[ring->id] = ring->get_seqno(ring, false);
 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
 	error->head[ring->id] = I915_READ_HEAD(ring);
 	error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1199,6 +1246,11 @@
 		error->done_reg = I915_READ(DONE_REG);
 	}
 
+	if (INTEL_INFO(dev)->gen == 7)
+		error->err_int = I915_READ(GEN7_ERR_INT);
+
+	i915_get_extra_instdone(dev, error->extra_instdone);
+
 	i915_gem_record_fences(dev, error);
 	i915_gem_record_rings(dev, error);
 
@@ -1210,7 +1262,7 @@
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
 		i++;
 	error->active_bo_count = i;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 		if (obj->pin_count)
 			i++;
 	error->pinned_bo_count = i - error->active_bo_count;
@@ -1235,7 +1287,7 @@
 		error->pinned_bo_count =
 			capture_pinned_bo(error->pinned_bo,
 					  error->pinned_bo_count,
-					  &dev_priv->mm.gtt_list);
+					  &dev_priv->mm.bound_list);
 
 	do_gettimeofday(&error->time);
 
@@ -1274,24 +1326,26 @@
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t instdone[I915_NUM_INSTDONE_REG];
 	u32 eir = I915_READ(EIR);
-	int pipe;
+	int pipe, i;
 
 	if (!eir)
 		return;
 
 	pr_err("render error detected, EIR: 0x%08x\n", eir);
 
+	i915_get_extra_instdone(dev, instdone);
+
 	if (IS_G4X(dev)) {
 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			pr_err("  INSTDONE: 0x%08x\n",
-			       I915_READ(INSTDONE_I965));
+			for (i = 0; i < ARRAY_SIZE(instdone); i++)
+				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
@@ -1325,12 +1379,13 @@
 	if (eir & I915_ERROR_INSTRUCTION) {
 		pr_err("instruction error\n");
 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
+		for (i = 0; i < ARRAY_SIZE(instdone); i++)
+			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 		if (INTEL_INFO(dev)->gen < 4) {
 			u32 ipeir = I915_READ(IPEIR);
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
-			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
 			I915_WRITE(IPEIR, ipeir);
 			POSTING_READ(IPEIR);
@@ -1339,10 +1394,7 @@
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			pr_err("  INSTDONE: 0x%08x\n",
-			       I915_READ(INSTDONE_I965));
 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
@@ -1590,7 +1642,8 @@
 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
 {
 	if (list_empty(&ring->request_list) ||
-	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+	    i915_seqno_passed(ring->get_seqno(ring, false),
+			      ring_last_seqno(ring))) {
 		/* Issue a wake-up to catch stuck h/w. */
 		if (waitqueue_active(&ring->irq_queue)) {
 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1656,7 +1709,7 @@
 {
 	struct drm_device *dev = (struct drm_device *)data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
 	struct intel_ring_buffer *ring;
 	bool err = false, idle;
 	int i;
@@ -1684,25 +1737,16 @@
 		return;
 	}
 
-	if (INTEL_INFO(dev)->gen < 4) {
-		instdone = I915_READ(INSTDONE);
-		instdone1 = 0;
-	} else {
-		instdone = I915_READ(INSTDONE_I965);
-		instdone1 = I915_READ(INSTDONE1);
-	}
-
+	i915_get_extra_instdone(dev, instdone);
 	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
-	    dev_priv->last_instdone == instdone &&
-	    dev_priv->last_instdone1 == instdone1) {
+	    memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
 		if (i915_hangcheck_hung(dev))
 			return;
 	} else {
 		dev_priv->hangcheck_count = 0;
 
 		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
-		dev_priv->last_instdone = instdone;
-		dev_priv->last_instdone1 = instdone1;
+		memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
 	}
 
 repeat:
@@ -2647,7 +2691,7 @@
 
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce..a828e90 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
 #define RING_ACTHD(base)	((base)+0x74)
 #define RING_NOPID(base)	((base)+0x94)
 #define RING_IMR(base)		((base)+0xa8)
+#define RING_TIMESTAMP(base)	((base)+0x358)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -478,6 +479,11 @@
 #define IPEIR_I965	0x02064
 #define IPEHR_I965	0x02068
 #define INSTDONE_I965	0x0206c
+#define GEN7_INSTDONE_1		0x0206c
+#define GEN7_SC_INSTDONE	0x07100
+#define GEN7_SAMPLER_INSTDONE	0x0e160
+#define GEN7_ROW_INSTDONE	0x0e164
+#define I915_NUM_INSTDONE_REG	4
 #define RING_IPEIR(base)	((base)+0x64)
 #define RING_IPEHR(base)	((base)+0x68)
 #define RING_INSTDONE(base)	((base)+0x6c)
@@ -500,6 +506,8 @@
 #define DMA_FADD_I8XX	0x020d0
 
 #define ERROR_GEN6	0x040a0
+#define GEN7_ERR_INT	0x44040
+#define   ERR_INT_MMIO_UNCLAIMED (1<<13)
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
@@ -529,6 +537,8 @@
 #define   GFX_PSMI_GRANULARITY		(1<<10)
 #define   GFX_PPGTT_ENABLE		(1<<9)
 
+#define VLV_DISPLAY_BASE 0x180000
+
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -1496,6 +1506,14 @@
 					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
 					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
 					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg)	(HSW_CXT_POWER_SIZE(ctx_reg) + \
+					 HSW_CXT_RING_SIZE(ctx_reg) + \
+					 HSW_CXT_RENDER_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
 
 /*
  * Overlay regs
@@ -1549,12 +1567,35 @@
 
 /* VGA port control */
 #define ADPA			0x61100
+#define PCH_ADPA                0xe1100
+#define VLV_ADPA		(VLV_DISPLAY_BASE + ADPA)
+
 #define   ADPA_DAC_ENABLE	(1<<31)
 #define   ADPA_DAC_DISABLE	0
 #define   ADPA_PIPE_SELECT_MASK	(1<<30)
 #define   ADPA_PIPE_A_SELECT	0
 #define   ADPA_PIPE_B_SELECT	(1<<30)
 #define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 #define   ADPA_USE_VGA_HVPOLARITY (1<<15)
 #define   ADPA_SETS_HVPOLARITY	0
 #define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -3889,31 +3930,6 @@
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 
-/* CRT */
-#define PCH_ADPA                0xe1100
-#define  ADPA_TRANS_SELECT_MASK (1<<30)
-#define  ADPA_TRANS_A_SELECT    0
-#define  ADPA_TRANS_B_SELECT    (1<<30)
-#define  ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
-#define  ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
-#define  ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
-#define  ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
-#define  ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
-#define  ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
-#define  ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
-#define  ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
-#define  ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
-#define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
@@ -4021,6 +4037,8 @@
 #define  PORT_TRANS_C_SEL_CPT	(2<<29)
 #define  PORT_TRANS_SEL_MASK	(3<<29)
 #define  PORT_TRANS_SEL_CPT(pipe)	((pipe) << 29)
+#define  PORT_TO_PIPE(val)	(((val) & (1<<30)) >> 30)
+#define  PORT_TO_PIPE_CPT(val)	(((val) & PORT_TRANS_SEL_MASK) >> 29)
 
 #define TRANS_DP_CTL_A		0xe0300
 #define TRANS_DP_CTL_B		0xe1300
@@ -4239,7 +4257,15 @@
 #define G4X_HDMIW_HDMIEDID		0x6210C
 
 #define IBX_HDMIW_HDMIEDID_A		0xE2050
+#define IBX_HDMIW_HDMIEDID_B		0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					IBX_HDMIW_HDMIEDID_A, \
+					IBX_HDMIW_HDMIEDID_B)
 #define IBX_AUD_CNTL_ST_A		0xE20B4
+#define IBX_AUD_CNTL_ST_B		0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					IBX_AUD_CNTL_ST_A, \
+					IBX_AUD_CNTL_ST_B)
 #define IBX_ELD_BUFFER_SIZE		(0x1f << 10)
 #define IBX_ELD_ADDRESS			(0x1f << 5)
 #define IBX_ELD_ACK			(1 << 4)
@@ -4248,7 +4274,15 @@
 #define IBX_CP_READYB			(1 << 1)
 
 #define CPT_HDMIW_HDMIEDID_A		0xE5050
+#define CPT_HDMIW_HDMIEDID_B		0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					CPT_HDMIW_HDMIEDID_A, \
+					CPT_HDMIW_HDMIEDID_B)
 #define CPT_AUD_CNTL_ST_A		0xE50B4
+#define CPT_AUD_CNTL_ST_B		0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					CPT_AUD_CNTL_ST_A, \
+					CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 
 /* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4292,15 @@
 #define GEN7_SO_WRITE_OFFSET(n)		(0x5280 + (n) * 4)
 
 #define IBX_AUD_CONFIG_A			0xe2000
+#define IBX_AUD_CONFIG_B			0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+					IBX_AUD_CONFIG_A, \
+					IBX_AUD_CONFIG_B)
 #define CPT_AUD_CONFIG_A			0xe5000
+#define CPT_AUD_CONFIG_B			0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+					CPT_AUD_CONFIG_A, \
+					CPT_AUD_CONFIG_B)
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
@@ -4269,195 +4311,233 @@
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
+/* HSW Audio */
+#define   HSW_AUD_CONFIG_A		0x65000 /* Audio Configuration Transcoder A */
+#define   HSW_AUD_CONFIG_B		0x65100 /* Audio Configuration Transcoder B */
+#define   HSW_AUD_CFG(pipe) _PIPE(pipe, \
+					HSW_AUD_CONFIG_A, \
+					HSW_AUD_CONFIG_B)
+
+#define   HSW_AUD_MISC_CTRL_A		0x65010 /* Audio Misc Control Convert 1 */
+#define   HSW_AUD_MISC_CTRL_B		0x65110 /* Audio Misc Control Convert 2 */
+#define   HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_MISC_CTRL_A, \
+					HSW_AUD_MISC_CTRL_B)
+
+#define   HSW_AUD_DIP_ELD_CTRL_ST_A	0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define   HSW_AUD_DIP_ELD_CTRL_ST_B	0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define   HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_DIP_ELD_CTRL_ST_A, \
+					HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define   HSW_AUD_DIG_CNVT_1		0x65080 /* Audio Converter 1 */
+#define   HSW_AUD_DIG_CNVT_2		0x65180 /* Audio Converter 1 */
+#define   AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+					HSW_AUD_DIG_CNVT_1, \
+					HSW_AUD_DIG_CNVT_2)
+#define   DIP_PORT_SEL_MASK		0x3
+
+#define   HSW_AUD_EDID_DATA_A		0x65050
+#define   HSW_AUD_EDID_DATA_B		0x65150
+#define   HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+					HSW_AUD_EDID_DATA_A, \
+					HSW_AUD_EDID_DATA_B)
+
+#define   HSW_AUD_PIPE_CONV_CFG		0x6507c /* Audio pipe and converter configs */
+#define   HSW_AUD_PIN_ELD_CP_VLD	0x650c0 /* Audio ELD and CP Ready Status */
+#define   AUDIO_INACTIVE_C		(1<<11)
+#define   AUDIO_INACTIVE_B		(1<<7)
+#define   AUDIO_INACTIVE_A		(1<<3)
+#define   AUDIO_OUTPUT_ENABLE_A		(1<<2)
+#define   AUDIO_OUTPUT_ENABLE_B		(1<<6)
+#define   AUDIO_OUTPUT_ENABLE_C		(1<<10)
+#define   AUDIO_ELD_VALID_A		(1<<0)
+#define   AUDIO_ELD_VALID_B		(1<<4)
+#define   AUDIO_ELD_VALID_C		(1<<8)
+#define   AUDIO_CP_READY_A		(1<<1)
+#define   AUDIO_CP_READY_B		(1<<5)
+#define   AUDIO_CP_READY_C		(1<<9)
+
 /* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1		0x45400		/* BIOS */
-#define HSW_PWR_WELL_CTL2		0x45404		/* Driver */
-#define HSW_PWR_WELL_CTL3		0x45408		/* KVMR */
-#define HSW_PWR_WELL_CTL4		0x4540C		/* Debug */
-#define   HSW_PWR_WELL_ENABLE				(1<<31)
-#define   HSW_PWR_WELL_STATE				(1<<30)
-#define HSW_PWR_WELL_CTL5		0x45410
+#define HSW_PWR_WELL_CTL1			0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2			0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3			0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4			0x4540C /* Debug */
+#define   HSW_PWR_WELL_ENABLE			(1<<31)
+#define   HSW_PWR_WELL_STATE			(1<<30)
+#define HSW_PWR_WELL_CTL5			0x45410
 #define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
 #define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
-#define   HSW_PWR_WELL_FORCE_ON				(1<<19)
-#define HSW_PWR_WELL_CTL6		0x45414
+#define   HSW_PWR_WELL_FORCE_ON			(1<<19)
+#define HSW_PWR_WELL_CTL6			0x45414
 
 /* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A			0x60400
-#define PIPE_DDI_FUNC_CTL_B			0x61400
-#define PIPE_DDI_FUNC_CTL_C			0x62400
+#define PIPE_DDI_FUNC_CTL_A		0x60400
+#define PIPE_DDI_FUNC_CTL_B		0x61400
+#define PIPE_DDI_FUNC_CTL_C		0x62400
 #define PIPE_DDI_FUNC_CTL_EDP		0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
-					PIPE_DDI_FUNC_CTL_A, \
-					PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+				       PIPE_DDI_FUNC_CTL_B)
 #define  PIPE_DDI_FUNC_ENABLE		(1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  PIPE_DDI_PORT_MASK			(7<<28)
-#define  PIPE_DDI_SELECT_PORT(x)		((x)<<28)
-#define  PIPE_DDI_MODE_SELECT_HDMI		(0<<24)
-#define  PIPE_DDI_MODE_SELECT_DVI		(1<<24)
+#define  PIPE_DDI_PORT_MASK		(7<<28)
+#define  PIPE_DDI_SELECT_PORT(x)	((x)<<28)
+#define  PIPE_DDI_MODE_SELECT_MASK	(7<<24)
+#define  PIPE_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  PIPE_DDI_MODE_SELECT_DVI	(1<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_SST	(2<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_MST	(3<<24)
-#define  PIPE_DDI_MODE_SELECT_FDI		(4<<24)
-#define  PIPE_DDI_BPC_8					(0<<20)
-#define  PIPE_DDI_BPC_10				(1<<20)
-#define  PIPE_DDI_BPC_6					(2<<20)
-#define  PIPE_DDI_BPC_12				(3<<20)
-#define  PIPE_DDI_BFI_ENABLE			(1<<4)
-#define  PIPE_DDI_PORT_WIDTH_X1			(0<<1)
-#define  PIPE_DDI_PORT_WIDTH_X2			(1<<1)
-#define  PIPE_DDI_PORT_WIDTH_X4			(3<<1)
+#define  PIPE_DDI_MODE_SELECT_FDI	(4<<24)
+#define  PIPE_DDI_BPC_MASK		(7<<20)
+#define  PIPE_DDI_BPC_8			(0<<20)
+#define  PIPE_DDI_BPC_10		(1<<20)
+#define  PIPE_DDI_BPC_6			(2<<20)
+#define  PIPE_DDI_BPC_12		(3<<20)
+#define  PIPE_DDI_PVSYNC		(1<<17)
+#define  PIPE_DDI_PHSYNC		(1<<16)
+#define  PIPE_DDI_BFI_ENABLE		(1<<4)
+#define  PIPE_DDI_PORT_WIDTH_X1		(0<<1)
+#define  PIPE_DDI_PORT_WIDTH_X2		(1<<1)
+#define  PIPE_DDI_PORT_WIDTH_X4		(3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A			0x64040
 #define DP_TP_CTL_B			0x64140
-#define DP_TP_CTL(port) _PORT(port, \
-					DP_TP_CTL_A, \
-					DP_TP_CTL_B)
-#define  DP_TP_CTL_ENABLE		(1<<31)
-#define  DP_TP_CTL_MODE_SST	(0<<27)
-#define  DP_TP_CTL_MODE_MST	(1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE			(1<<31)
+#define  DP_TP_CTL_MODE_SST			(0<<27)
+#define  DP_TP_CTL_MODE_MST			(1<<27)
 #define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
-#define  DP_TP_CTL_FDI_AUTOTRAIN	(1<<15)
+#define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
 #define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
-#define  DP_TP_CTL_LINK_TRAIN_NORMAL	(3<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
 
 /* DisplayPort Transport Status */
 #define DP_TP_STATUS_A			0x64044
 #define DP_TP_STATUS_B			0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
-					DP_TP_STATUS_A, \
-					DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
 #define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
 
 /* DDI Buffer Control */
 #define DDI_BUF_CTL_A				0x64000
 #define DDI_BUF_CTL_B				0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
-					DDI_BUF_CTL_A, \
-					DDI_BUF_CTL_B)
-#define  DDI_BUF_CTL_ENABLE				(1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE			(1<<31)
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
-#define  DDI_BUF_EMP_400MV_3_5DB_HSW	(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
-#define  DDI_BUF_EMP_400MV_9_5DB_HSW	(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_400MV_9_5DB_HSW		(3<<24)   /* Sel3 */
 #define  DDI_BUF_EMP_600MV_0DB_HSW		(4<<24)   /* Sel4 */
-#define  DDI_BUF_EMP_600MV_3_5DB_HSW	(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_600MV_3_5DB_HSW		(5<<24)   /* Sel5 */
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
-#define  DDI_BUF_EMP_800MV_3_5DB_HSW	(8<<24)   /* Sel8 */
-#define  DDI_BUF_EMP_MASK				(0xf<<24)
-#define  DDI_BUF_IS_IDLE				(1<<7)
-#define  DDI_PORT_WIDTH_X1				(0<<1)
-#define  DDI_PORT_WIDTH_X2				(1<<1)
-#define  DDI_PORT_WIDTH_X4				(3<<1)
+#define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+#define  DDI_BUF_EMP_MASK			(0xf<<24)
+#define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_PORT_WIDTH_X1			(0<<1)
+#define  DDI_PORT_WIDTH_X2			(1<<1)
+#define  DDI_PORT_WIDTH_X4			(3<<1)
 #define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
 
 /* DDI Buffer Translations */
 #define DDI_BUF_TRANS_A				0x64E00
 #define DDI_BUF_TRANS_B				0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
-					DDI_BUF_TRANS_A, \
-					DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
  * SBI_ADDR, which contains the register offset; and SBI_DATA,
  * which contains the payload */
-#define SBI_ADDR				0xC6000
-#define SBI_DATA				0xC6004
+#define SBI_ADDR			0xC6000
+#define SBI_DATA			0xC6004
 #define SBI_CTL_STAT			0xC6008
 #define  SBI_CTL_OP_CRRD		(0x6<<8)
 #define  SBI_CTL_OP_CRWR		(0x7<<8)
 #define  SBI_RESPONSE_FAIL		(0x1<<1)
-#define  SBI_RESPONSE_SUCCESS	(0x0<<1)
-#define  SBI_BUSY				(0x1<<0)
-#define  SBI_READY				(0x0<<0)
+#define  SBI_RESPONSE_SUCCESS		(0x0<<1)
+#define  SBI_BUSY			(0x1<<0)
+#define  SBI_READY			(0x0<<0)
 
 /* SBI offsets */
-#define  SBI_SSCDIVINTPHASE6		0x0600
+#define  SBI_SSCDIVINTPHASE6			0x0600
 #define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
 #define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
 #define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
 #define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
-#define   SBI_SSCDIVINTPHASE_DIR(x)			((x)<<15)
+#define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
 #define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
-#define  SBI_SSCCTL					0x020c
+#define  SBI_SSCCTL				0x020c
 #define  SBI_SSCCTL6				0x060C
-#define   SBI_SSCCTL_DISABLE		(1<<0)
+#define   SBI_SSCCTL_DISABLE			(1<<0)
 #define  SBI_SSCAUXDIV6				0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
-#define  SBI_DBUFF0					0x2a00
+#define  SBI_DBUFF0				0x2a00
 
 /* LPT PIXCLK_GATE */
-#define PIXCLK_GATE				0xC6020
-#define  PIXCLK_GATE_UNGATE		1<<0
-#define  PIXCLK_GATE_GATE		0<<0
+#define PIXCLK_GATE			0xC6020
+#define  PIXCLK_GATE_UNGATE		(1<<0)
+#define  PIXCLK_GATE_GATE		(0<<0)
 
 /* SPLL */
-#define SPLL_CTL				0x46020
+#define SPLL_CTL			0x46020
 #define  SPLL_PLL_ENABLE		(1<<31)
 #define  SPLL_PLL_SCC			(1<<28)
 #define  SPLL_PLL_NON_SCC		(2<<28)
-#define  SPLL_PLL_FREQ_810MHz	(0<<26)
-#define  SPLL_PLL_FREQ_1350MHz	(1<<26)
+#define  SPLL_PLL_FREQ_810MHz		(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz		(1<<26)
 
 /* WRPLL */
-#define WRPLL_CTL1				0x46040
-#define WRPLL_CTL2				0x46060
-#define  WRPLL_PLL_ENABLE				(1<<31)
-#define  WRPLL_PLL_SELECT_SSC			(0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SCC		(0x02<<28)
+#define WRPLL_CTL1			0x46040
+#define WRPLL_CTL2			0x46060
+#define  WRPLL_PLL_ENABLE		(1<<31)
+#define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
+#define  WRPLL_PLL_SELECT_NON_SCC	(0x02<<28)
 #define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
 /* WRPLL divider programming */
-#define  WRPLL_DIVIDER_REFERENCE(x)		((x)<<0)
-#define  WRPLL_DIVIDER_POST(x)			((x)<<8)
-#define  WRPLL_DIVIDER_FEEDBACK(x)		((x)<<16)
+#define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
+#define  WRPLL_DIVIDER_POST(x)		((x)<<8)
+#define  WRPLL_DIVIDER_FEEDBACK(x)	((x)<<16)
 
 /* Port clock selection */
 #define PORT_CLK_SEL_A			0x46100
 #define PORT_CLK_SEL_B			0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
-					PORT_CLK_SEL_A, \
-					PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
 #define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
 #define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
 #define  PORT_CLK_SEL_LCPLL_810		(2<<29)
-#define  PORT_CLK_SEL_SPLL			(3<<29)
+#define  PORT_CLK_SEL_SPLL		(3<<29)
 #define  PORT_CLK_SEL_WRPLL1		(4<<29)
 #define  PORT_CLK_SEL_WRPLL2		(5<<29)
 
 /* Pipe clock selection */
 #define PIPE_CLK_SEL_A			0x46140
 #define PIPE_CLK_SEL_B			0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
-					PIPE_CLK_SEL_A, \
-					PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
 /* For each pipe, we need to select the corresponding port clock */
-#define  PIPE_CLK_SEL_DISABLED	(0x0<<29)
-#define  PIPE_CLK_SEL_PORT(x)	((x+1)<<29)
+#define  PIPE_CLK_SEL_DISABLED		(0x0<<29)
+#define  PIPE_CLK_SEL_PORT(x)		((x+1)<<29)
 
 /* LCPLL Control */
-#define LCPLL_CTL				0x130040
+#define LCPLL_CTL			0x130040
 #define  LCPLL_PLL_DISABLE		(1<<31)
 #define  LCPLL_PLL_LOCK			(1<<30)
-#define  LCPLL_CD_CLOCK_DISABLE	(1<<25)
+#define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A		0x45270
 #define PIPE_WM_LINETIME_B		0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
-					PIPE_WM_LINETIME_A, \
-					PIPE_WM_LINETIME_B)
-#define   PIPE_WM_LINETIME_MASK		(0x1ff)
-#define   PIPE_WM_LINETIME_TIME(x)			((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+					   PIPE_WM_LINETIME_B)
+#define   PIPE_WM_LINETIME_MASK			(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)		((x))
 #define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)		((x)<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x)<<16)
 
 /* SFUSE_STRAP */
-#define SFUSE_STRAP				0xc2014
+#define SFUSE_STRAP			0xc2014
 #define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
 #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807..903eebd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@
 }
 
 static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
 }
 
 static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
 }
 
 static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
 }
@@ -93,6 +93,7 @@
 	.name = power_group_name,
 	.attrs =  rc6_attrs
 };
+#endif
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
@@ -202,37 +203,214 @@
 	.mmap = NULL
 };
 
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = (rp_state_cap & 0xff);
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev_priv->rps.cur_delay > val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.max_delay = val;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = (rp_state_cap & 0xff);
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev_priv->rps.cur_delay < val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.min_delay = val;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap;
+	ssize_t ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (attr == &dev_attr_gt_RP0_freq_mhz) {
+		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+	} else {
+		BUG();
+	}
+	return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
+	NULL,
+};
+
 void i915_setup_sysfs(struct drm_device *dev)
 {
 	int ret;
 
+#ifdef CONFIG_PM
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
-
-	if (IS_IVYBRIDGE(dev)) {
+#endif
+	if (HAS_L3_GPU_CACHE(dev)) {
 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		if (ret)
+			DRM_ERROR("gen6 sysfs setup failed\n");
+	}
 }
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
+	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+#ifdef CONFIG_PM
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
 }
-#else
-void i915_setup_sysfs(struct drm_device *dev)
-{
-	return;
-}
-
-void i915_teardown_sysfs(struct drm_device *dev)
-{
-	return;
-}
-#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a..8134421 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@
 );
 
 TRACE_EVENT(i915_gem_evict_everything,
-	    TP_PROTO(struct drm_device *dev, bool purgeable),
-	    TP_ARGS(dev, purgeable),
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(bool, purgeable)
 			    ),
 
 	    TP_fast_assign(
 			   __entry->dev = dev->primary->index;
-			   __entry->purgeable = purgeable;
 			  ),
 
-	    TP_printk("dev=%d%s",
-		      __entry->dev,
-		      __entry->purgeable ? ", purgeable only" : "")
+	    TP_printk("dev=%d", __entry->dev)
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@
 		(u32)(__entry->val >> 32))
 );
 
+TRACE_EVENT(intel_gpu_freq_change,
+	    TP_PROTO(u32 freq),
+	    TP_ARGS(freq),
+
+	    TP_STRUCT__entry(
+			     __field(u32, freq)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->freq = freq;
+			   ),
+
+	    TP_printk("new_freq=%u", __entry->freq)
+);
+
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 23bdc8c..c42b980 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -47,6 +47,7 @@
 struct intel_crt {
 	struct intel_encoder base;
 	bool force_hotplug_required;
+	u32 adpa_reg;
 };
 
 static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -55,41 +56,67 @@
 			    struct intel_crt, base);
 }
 
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 temp;
-
-	temp = I915_READ(PCH_ADPA);
-	temp &= ~ADPA_DAC_ENABLE;
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		temp |= ADPA_DAC_ENABLE;
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		/* Just leave port enable cleared */
-		break;
-	}
-
-	I915_WRITE(PCH_ADPA, temp);
+	return container_of(encoder, struct intel_crt, base);
 }
 
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 tmp;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (!(tmp & ADPA_DAC_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
 	u32 temp;
 
-	temp = I915_READ(ADPA);
+	temp = I915_READ(crt->adpa_reg);
 	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
 	temp &= ~ADPA_DAC_ENABLE;
+	I915_WRITE(crt->adpa_reg, temp);
+}
 
-	if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 temp;
+
+	temp = I915_READ(crt->adpa_reg);
+	temp |= ADPA_DAC_ENABLE;
+	I915_WRITE(crt->adpa_reg, temp);
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 temp;
+
+	temp = I915_READ(crt->adpa_reg);
+	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+	temp &= ~ADPA_DAC_ENABLE;
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -106,7 +133,51 @@
 		break;
 	}
 
-	I915_WRITE(ADPA, temp);
+	I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
+	struct drm_crtc *crtc;
+	int old_dpms;
+
+	/* PCH platforms and VLV only support on/off. */
+	if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	old_dpms = connector->dpms;
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = encoder->base.crtc;
+	if (!crtc) {
+		encoder->connectors_active = false;
+		return;
+	}
+
+	/* We need the pipe to run for anything but OFF. */
+	if (mode == DRM_MODE_DPMS_OFF)
+		encoder->connectors_active = false;
+	else
+		encoder->connectors_active = true;
+
+	if (mode < old_dpms) {
+		/* From off to on, enable the pipe first. */
+		intel_crtc_update_dpms(crtc);
+
+		intel_crt_set_dpms(encoder, mode);
+	} else {
+		intel_crt_set_dpms(encoder, mode);
+
+		intel_crtc_update_dpms(crtc);
+	}
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -145,19 +216,15 @@
 
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crt *crt =
+		intel_encoder_to_crt(to_intel_encoder(encoder));
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dpll_md_reg;
 	u32 adpa, dpll_md;
-	u32 adpa_reg;
 
 	dpll_md_reg = DPLL_MD(intel_crtc->pipe);
 
-	if (HAS_PCH_SPLIT(dev))
-		adpa_reg = PCH_ADPA;
-	else
-		adpa_reg = ADPA;
-
 	/*
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
@@ -185,7 +252,7 @@
 	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
 
-	I915_WRITE(adpa_reg, adpa);
+	I915_WRITE(crt->adpa_reg, adpa);
 }
 
 static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -545,14 +612,12 @@
 		return connector->status;
 
 	/* for pre-945g platforms use load detect */
-	if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
-				       &tmp)) {
+	if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
 		if (intel_crt_detect_ddc(connector))
 			status = connector_status_connected;
 		else
 			status = intel_crt_load_detect(crt);
-		intel_release_load_detect_pipe(&crt->base, connector,
-					       &tmp);
+		intel_release_load_detect_pipe(connector, &tmp);
 	} else
 		status = connector_status_unknown;
 
@@ -603,25 +668,15 @@
  * Routines for controlling stuff on the analog port
  */
 
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
 	.mode_fixup = intel_crt_mode_fixup,
-	.prepare = intel_encoder_prepare,
-	.commit = intel_encoder_commit,
 	.mode_set = intel_crt_mode_set,
-	.dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
-	.mode_fixup = intel_crt_mode_fixup,
-	.prepare = intel_encoder_prepare,
-	.commit = intel_encoder_commit,
-	.mode_set = intel_crt_mode_set,
-	.dpms = gmch_crt_dpms,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_crt_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_crt_destroy,
@@ -662,7 +717,6 @@
 	struct intel_crt *crt;
 	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
 
 	/* Skip machines without VGA that falsely report hotplug events */
 	if (dmi_check_system(intel_no_crt))
@@ -688,13 +742,11 @@
 	intel_connector_attach_encoder(intel_connector, &crt->base);
 
 	crt->base.type = INTEL_OUTPUT_ANALOG;
-	crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
-				1 << INTEL_ANALOG_CLONE_BIT |
-				1 << INTEL_SDVO_LVDS_CLONE_BIT);
+	crt->base.cloneable = true;
 	if (IS_HASWELL(dev))
 		crt->base.crtc_mask = (1 << 0);
 	else
-		crt->base.crtc_mask = (1 << 0) | (1 << 1);
+		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
 	if (IS_GEN2(dev))
 		connector->interlace_allowed = 0;
@@ -703,11 +755,18 @@
 	connector->doublescan_allowed = 0;
 
 	if (HAS_PCH_SPLIT(dev))
-		encoder_helper_funcs = &pch_encoder_funcs;
+		crt->adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		crt->adpa_reg = VLV_ADPA;
 	else
-		encoder_helper_funcs = &gmch_encoder_funcs;
+		crt->adpa_reg = ADPA;
 
-	drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
+	crt->base.disable = intel_disable_crt;
+	crt->base.enable = intel_enable_crt;
+	crt->base.get_hw_state = intel_crt_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 	drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c748..bfe3754 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@
 	case PORT_B:
 	case PORT_C:
 	case PORT_D:
-		intel_hdmi_init(dev, DDI_BUF_CTL(port));
+		intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
 		break;
 	default:
 		DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@
 	u16 r2;		/* Reference divider */
 };
 
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
 static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{19750,	38,	25,	18},
 	{20000,	48,	32,	18},
@@ -277,7 +278,6 @@
 	{23000,	36,	23,	15},
 	{23500,	40,	40,	23},
 	{23750,	26,	16,	14},
-	{23750,	26,	16,	14},
 	{24000,	36,	24,	15},
 	{25000,	36,	25,	15},
 	{25175,	26,	40,	33},
@@ -437,7 +437,6 @@
 	{108000,	8,	24,	15},
 	{108108,	8,	173,	108},
 	{109000,	6,	23,	19},
-	{109000,	6,	23,	19},
 	{110000,	6,	22,	18},
 	{110013,	6,	22,	18},
 	{110250,	8,	49,	30},
@@ -614,7 +613,6 @@
 	{218250,	4,	42,	26},
 	{218750,	4,	34,	21},
 	{219000,	4,	47,	29},
-	{219000,	4,	47,	29},
 	{220000,	4,	44,	27},
 	{220640,	4,	49,	30},
 	{220750,	4,	36,	22},
@@ -658,7 +656,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int port = intel_hdmi->ddi_port;
 	int pipe = intel_crtc->pipe;
-	int p, n2, r2, valid=0;
+	int p, n2, r2;
 	u32 temp, i;
 
 	/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@
 	 */
 	DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
 
-	for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
-		if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
-			p = wrpll_tmds_clock_table[i].p;
-			n2 = wrpll_tmds_clock_table[i].n2;
-			r2 = wrpll_tmds_clock_table[i].r2;
-
-			DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
-					crtc->mode.clock,
-					p, n2, r2);
-
-			valid = 1;
+	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+		if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
 			break;
-		}
-	}
 
-	if (!valid) {
-		DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
-				crtc->mode.clock);
-		return;
-	}
+	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+		i--;
+
+	p = wrpll_tmds_clock_table[i].p;
+	n2 = wrpll_tmds_clock_table[i].n2;
+	r2 = wrpll_tmds_clock_table[i].r2;
+
+	if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+		DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+			 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+	DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      crtc->mode.clock, p, n2, r2);
 
 	/* Enable LCPLL if disabled */
 	temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@
 		/* Proper support for digital audio needs a new logic and a new set
 		 * of registers, so we leave it for future patch bombing.
 		 */
-		DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+		DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
 				 pipe_name(intel_crtc->pipe));
+
+		/* write eld */
+		DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+		intel_write_eld(encoder, adjusted_mode);
 	}
 
 	/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
-	temp = I915_READ(DDI_FUNC_CTL(pipe));
-	temp &= ~PIPE_DDI_PORT_MASK;
-	temp &= ~PIPE_DDI_BPC_12;
-	temp |= PIPE_DDI_SELECT_PORT(port) |
-			PIPE_DDI_MODE_SELECT_HDMI |
-			((intel_crtc->bpp > 24) ?
-				PIPE_DDI_BPC_12 :
-				PIPE_DDI_BPC_8) |
-			PIPE_DDI_FUNC_ENABLE;
+	temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+	switch (intel_crtc->bpp) {
+	case 18:
+		temp |= PIPE_DDI_BPC_6;
+		break;
+	case 24:
+		temp |= PIPE_DDI_BPC_8;
+		break;
+	case 30:
+		temp |= PIPE_DDI_BPC_10;
+		break;
+	case 36:
+		temp |= PIPE_DDI_BPC_12;
+		break;
+	default:
+		WARN(1, "%d bpp unsupported by pipe DDI function\n",
+		     intel_crtc->bpp);
+	}
+
+	if (intel_hdmi->has_hdmi_sink)
+		temp |= PIPE_DDI_MODE_SELECT_HDMI;
+	else
+		temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= PIPE_DDI_PVSYNC;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= PIPE_DDI_PHSYNC;
 
 	I915_WRITE(DDI_FUNC_CTL(pipe), temp);
 
 	intel_hdmi->set_infoframes(encoder, adjusted_mode);
 }
 
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+			    enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 tmp;
+	int i;
+
+	tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+
+	if (!(tmp & DDI_BUF_CTL_ENABLE))
+		return false;
+
+	for_each_pipe(i) {
+		tmp = I915_READ(DDI_FUNC_CTL(i));
+
+		if ((tmp & PIPE_DDI_PORT_MASK)
+		    == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+			*pipe = i;
+			return true;
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+
+	return true;
+}
+
+void intel_enable_ddi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	int port = intel_hdmi->ddi_port;
 	u32 temp;
 
 	temp = I915_READ(DDI_BUF_CTL(port));
-
-	if (mode != DRM_MODE_DPMS_ON) {
-		temp &= ~DDI_BUF_CTL_ENABLE;
-	} else {
-		temp |= DDI_BUF_CTL_ENABLE;
-	}
+	temp |= DDI_BUF_CTL_ENABLE;
 
 	/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
 	 * and swing/emphasis values are ignored so nothing special needs
 	 * to be done besides enabling the port.
 	 */
-	I915_WRITE(DDI_BUF_CTL(port),
-			temp);
+	I915_WRITE(DDI_BUF_CTL(port), temp);
+}
+
+void intel_disable_ddi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	int port = intel_hdmi->ddi_port;
+	u32 temp;
+
+	temp = I915_READ(DDI_BUF_CTL(port));
+	temp &= ~DDI_BUF_CTL_ENABLE;
+
+	I915_WRITE(DDI_BUF_CTL(port), temp);
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c040aee..6f5aafa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@
 		/* Wait for the Pipe State to go off */
 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
 			     100))
-			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+			WARN(1, "pipe_off wait timed out\n");
 	} else {
 		u32 last_line, line_mask;
 		int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@
 		} while (((I915_READ(reg) & line_mask) != last_line) &&
 			 time_after(timeout, jiffies));
 		if (time_after(jiffies, timeout))
-			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+			WARN(1, "pipe_off wait timed out\n");
 	}
 }
 
@@ -1431,6 +1431,8 @@
  * protect mechanism may be enabled.
  *
  * Note!  This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
  */
 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -1860,59 +1862,6 @@
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
-			   enum pipe pipe, int reg, u32 port_sel)
-{
-	u32 val = I915_READ(reg);
-	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
-		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
-		I915_WRITE(reg, val & ~DP_PORT_EN);
-	}
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
-			     enum pipe pipe, int reg)
-{
-	u32 val = I915_READ(reg);
-	if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
-		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
-			      reg, pipe);
-		I915_WRITE(reg, val & ~PORT_ENABLE);
-	}
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
-				    enum pipe pipe)
-{
-	u32 reg, val;
-
-	val = I915_READ(PCH_PP_CONTROL);
-	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
-	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
-	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
-	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
-	reg = PCH_ADPA;
-	val = I915_READ(reg);
-	if (adpa_pipe_enabled(dev_priv, pipe, val))
-		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
-	reg = PCH_LVDS;
-	val = I915_READ(reg);
-	if (lvds_pipe_enabled(dev_priv, pipe, val)) {
-		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
-		I915_WRITE(reg, val & ~LVDS_PORT_EN);
-		POSTING_READ(reg);
-		udelay(100);
-	}
-
-	disable_pch_hdmi(dev_priv, pipe, HDMIB);
-	disable_pch_hdmi(dev_priv, pipe, HDMIC);
-	disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
 			   struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@
 
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-		    struct drm_framebuffer *old_fb)
+		    struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_framebuffer *old_fb;
 	int ret;
 
 	/* no fb bound */
-	if (!crtc->fb) {
+	if (!fb) {
 		DRM_ERROR("No FB bound\n");
 		return 0;
 	}
@@ -2224,7 +2174,7 @@
 
 	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev,
-					 to_intel_framebuffer(crtc->fb)->obj,
+					 to_intel_framebuffer(fb)->obj,
 					 NULL);
 	if (ret != 0) {
 		mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@
 		return ret;
 	}
 
-	if (old_fb)
-		intel_finish_fb(old_fb);
+	if (crtc->fb)
+		intel_finish_fb(crtc->fb);
 
-	ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+	ret = dev_priv->display.update_plane(crtc, fb, x, y);
 	if (ret) {
-		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
 		mutex_unlock(&dev->struct_mutex);
 		DRM_ERROR("failed to update base address\n");
 		return ret;
 	}
 
+	old_fb = crtc->fb;
+	crtc->fb = fb;
+	crtc->x = x;
+	crtc->y = y;
+
 	if (old_fb) {
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@
 	DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
@@ -2754,6 +2708,35 @@
 	}
 }
 
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* Switch from PCDclk to Rawclk */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+	/* Disable CPU FDI TX PLL */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+	/* Wait for the clocks to turn off. */
+	POSTING_READ(reg);
+	udelay(100);
+}
+
 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@
 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
+	struct intel_encoder *intel_encoder;
 
 	/*
 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
 	 * must be driven by its own crtc; no sharing is possible.
 	 */
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
 		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
 		 * CPU handles all others */
@@ -2852,19 +2835,19 @@
 			/* It is still unclear how this will work on PPT, so throw up a warning */
 			WARN_ON(!HAS_PCH_LPT(dev));
 
-			if (encoder->type == DRM_MODE_ENCODER_DAC) {
+			if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
 				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
 				return true;
 			} else {
 				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
-						encoder->type);
+					      intel_encoder->type);
 				return false;
 			}
 		}
 
-		switch (encoder->type) {
+		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_EDP:
-			if (!intel_encoder_is_pch_edp(&encoder->base))
+			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
 				return false;
 			continue;
 		}
@@ -3181,11 +3164,14 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 temp;
 	bool is_pch_port;
 
+	WARN_ON(!crtc->enabled);
+
 	if (intel_crtc->active)
 		return;
 
@@ -3200,10 +3186,16 @@
 
 	is_pch_port = intel_crtc_driving_pch(crtc);
 
-	if (is_pch_port)
-		ironlake_fdi_pll_enable(crtc);
-	else
-		ironlake_fdi_disable(crtc);
+	if (is_pch_port) {
+		ironlake_fdi_pll_enable(intel_crtc);
+	} else {
+		assert_fdi_tx_disabled(dev_priv, pipe);
+		assert_fdi_rx_disabled(dev_priv, pipe);
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
 
 	/* Enable panel fitting for LVDS */
 	if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	if (HAS_PCH_CPT(dev))
+		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 }
 
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 reg, temp;
 
+
 	if (!intel_crtc->active)
 		return;
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
 	intel_crtc_wait_for_pending_flips(crtc);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@
 	I915_WRITE(PF_CTL(pipe), 0);
 	I915_WRITE(PF_WIN_SZ(pipe), 0);
 
-	ironlake_fdi_disable(crtc);
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
 
-	/* This is a horrible layering violation; we should be doing this in
-	 * the connector/encoder ->prepare instead, but we don't always have
-	 * enough information there about the config to know whether it will
-	 * actually be necessary or just cause undesired flicker.
-	 */
-	intel_disable_pch_ports(dev_priv, pipe);
+	ironlake_fdi_disable(crtc);
 
 	intel_disable_transcoder(dev_priv, pipe);
 
@@ -3304,26 +3304,7 @@
 	/* disable PCH DPLL */
 	intel_disable_pch_pll(intel_crtc);
 
-	/* Switch from PCDclk to Rawclk */
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
-	/* Disable CPU FDI TX PLL */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
-	POSTING_READ(reg);
-	udelay(100);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
-	/* Wait for the clocks to turn off. */
-	POSTING_READ(reg);
-	udelay(100);
+	ironlake_fdi_pll_disable(intel_crtc);
 
 	intel_crtc->active = false;
 	intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
-
-	/* XXX: When our outputs are all unaware of DPMS modes other than off
-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-	 */
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
-		ironlake_crtc_enable(crtc);
-		break;
-
-	case DRM_MODE_DPMS_OFF:
-		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
-		ironlake_crtc_disable(crtc);
-		break;
-	}
-}
-
 static void ironlake_crtc_off(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 
+	WARN_ON(!crtc->enabled);
+
 	if (intel_crtc->active)
 		return;
 
@@ -3405,6 +3365,9 @@
 	/* Give the overlay scaler a chance to enable if it's on this pipe */
 	intel_crtc_dpms_overlay(intel_crtc, true);
 	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
 }
 
 static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 
+
 	if (!intel_crtc->active)
 		return;
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
 	/* Give the overlay scaler a chance to disable if it's on this pipe */
 	intel_crtc_wait_for_pending_flips(crtc);
 	drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@
 	intel_update_watermarks(dev);
 }
 
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	/* XXX: When our outputs are all unaware of DPMS modes other than off
-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-	 */
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-		i9xx_crtc_enable(crtc);
-		break;
-	case DRM_MODE_DPMS_OFF:
-		i9xx_crtc_disable(crtc);
-		break;
-	}
-}
-
 static void i9xx_crtc_off(struct drm_crtc *crtc)
 {
 }
 
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+				    bool enabled)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	bool enabled;
-
-	if (intel_crtc->dpms_mode == mode)
-		return;
-
-	intel_crtc->dpms_mode = mode;
-
-	dev_priv->display.dpms(crtc, mode);
 
 	if (!dev->primary->master)
 		return;
@@ -3483,8 +3423,6 @@
 	if (!master_priv->sarea_priv)
 		return;
 
-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
 	switch (pipe) {
 	case 0:
 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@
 	}
 }
 
-static void intel_crtc_disable(struct drm_crtc *crtc)
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
 {
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	bool enable = false;
 
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+		enable |= intel_encoder->connectors_active;
+
+	if (enable)
+		dev_priv->display.crtc_enable(crtc);
+	else
+		dev_priv->display.crtc_disable(crtc);
+
+	intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
+static void intel_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* crtc should still be enabled when we disable it. */
+	WARN_ON(!crtc->enabled);
+
+	dev_priv->display.crtc_disable(crtc);
+	intel_crtc_update_sarea(crtc, false);
 	dev_priv->display.off(crtc);
 
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,55 +3483,34 @@
 		mutex_lock(&dev->struct_mutex);
 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
 		mutex_unlock(&dev->struct_mutex);
+		crtc->fb = NULL;
+	}
+
+	/* Update computed state. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		if (connector->encoder->crtc != crtc)
+			continue;
+
+		connector->dpms = DRM_MODE_DPMS_OFF;
+		to_intel_encoder(connector->encoder)->connectors_active = false;
 	}
 }
 
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here.  We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
 {
-	i9xx_crtc_disable(crtc);
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->enabled)
+			intel_crtc_disable(crtc);
+	}
 }
 
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
 {
-	i9xx_crtc_enable(crtc);
-}
-
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
-{
-	ironlake_crtc_disable(crtc);
-}
-
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
-{
-	ironlake_crtc_enable(crtc);
-}
-
-void intel_encoder_prepare(struct drm_encoder *encoder)
-{
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	/* lvds has its own version of prepare see intel_lvds_prepare */
-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-void intel_encoder_commit(struct drm_encoder *encoder)
-{
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	struct drm_device *dev = encoder->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-
-	/* lvds has its own version of commit see intel_lvds_commit */
-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-
-	if (HAS_PCH_CPT(dev))
-		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 }
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -3575,6 +3521,92 @@
 	kfree(intel_encoder);
 }
 
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+{
+	if (mode == DRM_MODE_DPMS_ON) {
+		encoder->connectors_active = true;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	} else {
+		encoder->connectors_active = false;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	}
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
+{
+	if (connector->get_hw_state(connector)) {
+		struct intel_encoder *encoder = connector->encoder;
+		struct drm_crtc *crtc;
+		bool encoder_enabled;
+		enum pipe pipe;
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base));
+
+		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+		     "wrong connector dpms state\n");
+		WARN(connector->base.encoder != &encoder->base,
+		     "active connector not linked to encoder\n");
+		WARN(!encoder->connectors_active,
+		     "encoder->connectors_active not set\n");
+
+		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+		WARN(!encoder_enabled, "encoder not enabled\n");
+		if (WARN_ON(!encoder->base.crtc))
+			return;
+
+		crtc = encoder->base.crtc;
+
+		WARN(!crtc->enabled, "crtc not enabled\n");
+		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+		WARN(pipe != to_intel_crtc(crtc)->pipe,
+		     "encoder active on the wrong pipe\n");
+	}
+}
+
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
+{
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+	/* All the simple cases only support two dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	if (encoder->base.crtc)
+		intel_encoder_dpms(encoder, mode);
+	else
+		WARN_ON(encoder->connectors_active != false);
+
+	intel_modeset_check_state(connector->dev);
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+	enum pipe pipe = 0;
+	struct intel_encoder *encoder = connector->encoder;
+
+	return encoder->get_hw_state(encoder, &pipe);
+}
+
 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
 				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
@@ -3593,6 +3625,13 @@
 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
 		drm_mode_set_crtcinfo(adjusted_mode, 0);
 
+	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+	 * with a hsync front porch of 0.
+	 */
+	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+		return false;
+
 	return true;
 }
 
@@ -3728,6 +3767,7 @@
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
 					 unsigned int *pipe_bpp,
 					 struct drm_display_mode *mode)
 {
@@ -3797,7 +3837,7 @@
 	 * also stays within the max display bpc discovered above.
 	 */
 
-	switch (crtc->fb->depth) {
+	switch (fb->depth) {
 	case 8:
 		bpc = 8; /* since we go through a colormap */
 		break;
@@ -4216,7 +4256,7 @@
 			      struct drm_display_mode *mode,
 			      struct drm_display_mode *adjusted_mode,
 			      int x, int y,
-			      struct drm_framebuffer *old_fb)
+			      struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@
 	I915_WRITE(DSPCNTR(plane), dspcntr);
 	POSTING_READ(DSPCNTR(plane));
 
-	ret = intel_pipe_set_base(crtc, x, y, old_fb);
+	ret = intel_pipe_set_base(crtc, x, y, fb);
 
 	intel_update_watermarks(dev);
 
@@ -4560,24 +4600,130 @@
 	return 120000;
 }
 
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+				  struct drm_display_mode *adjusted_mode,
+				  bool dither)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint32_t val;
+
+	val = I915_READ(PIPECONF(pipe));
+
+	val &= ~PIPE_BPC_MASK;
+	switch (intel_crtc->bpp) {
+	case 18:
+		val |= PIPE_6BPC;
+		break;
+	case 24:
+		val |= PIPE_8BPC;
+		break;
+	case 30:
+		val |= PIPE_10BPC;
+		break;
+	case 36:
+		val |= PIPE_12BPC;
+		break;
+	default:
+		val |= PIPE_8BPC;
+		break;
+	}
+
+	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+	if (dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	val &= ~PIPECONF_INTERLACE_MASK;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	I915_WRITE(PIPECONF(pipe), val);
+	POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+				    struct drm_display_mode *adjusted_mode,
+				    intel_clock_t *clock,
+				    bool *has_reduced_clock,
+				    intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	int refclk;
+	const intel_limit_t *limit;
+	bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (intel_encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+	}
+
+	refclk = ironlake_get_refclk(crtc);
+
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc, refclk);
+	ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+			      clock);
+	if (!ret)
+		return false;
+
+	if (is_lvds && dev_priv->lvds_downclock_avail) {
+		/*
+		 * Ensure we match the reduced clock's P to the target clock.
+		 * If the clocks don't match, we can't switch the display clock
+		 * by using the FP0/FP1. In such case we will disable the LVDS
+		 * downclock feature.
+		*/
+		*has_reduced_clock = limit->find_pll(limit, crtc,
+						     dev_priv->lvds_downclock,
+						     refclk,
+						     clock,
+						     reduced_clock);
+	}
+
+	if (is_sdvo && is_tv)
+		i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+	return true;
+}
+
 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 				  struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode,
 				  int x, int y,
-				  struct drm_framebuffer *old_fb)
+				  struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
-	int refclk, num_connectors = 0;
+	int num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
-	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+	u32 dpll, fp = 0, fp2 = 0;
 	bool ok, has_reduced_clock = false, is_sdvo = false;
 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 	struct intel_encoder *encoder, *edp_encoder = NULL;
-	const intel_limit_t *limit;
 	int ret;
 	struct fdi_m_n m_n = {0};
 	u32 temp;
@@ -4619,16 +4765,8 @@
 		num_connectors++;
 	}
 
-	refclk = ironlake_get_refclk(crtc);
-
-	/*
-	 * Returns a set of divisors for the desired target clock with the given
-	 * refclk, or FALSE.  The returned values represent the clock equation:
-	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-	 */
-	limit = intel_limit(crtc, refclk);
-	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
-			     &clock);
+	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+				     &has_reduced_clock, &reduced_clock);
 	if (!ok) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
@@ -4637,24 +4775,6 @@
 	/* Ensure that the cursor is valid for the new mode before changing... */
 	intel_crtc_update_cursor(crtc, true);
 
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		has_reduced_clock = limit->find_pll(limit, crtc,
-						    dev_priv->lvds_downclock,
-						    refclk,
-						    &clock,
-						    &reduced_clock);
-	}
-
-	if (is_sdvo && is_tv)
-		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
 	/* FDI link */
 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 	lane = 0;
@@ -4682,32 +4802,17 @@
 		target_clock = adjusted_mode->clock;
 
 	/* determine panel color depth */
-	temp = I915_READ(PIPECONF(pipe));
-	temp &= ~PIPE_BPC_MASK;
-	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
-	switch (pipe_bpp) {
-	case 18:
-		temp |= PIPE_6BPC;
-		break;
-	case 24:
-		temp |= PIPE_8BPC;
-		break;
-	case 30:
-		temp |= PIPE_10BPC;
-		break;
-	case 36:
-		temp |= PIPE_12BPC;
-		break;
-	default:
-		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
-			pipe_bpp);
-		temp |= PIPE_8BPC;
-		pipe_bpp = 24;
-		break;
-	}
+	dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
+	if (is_lvds && dev_priv->lvds_dither)
+		dither = true;
 
+	if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+	    pipe_bpp != 36) {
+		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
+		     pipe_bpp);
+		pipe_bpp = 24;
+	}
 	intel_crtc->bpp = pipe_bpp;
-	I915_WRITE(PIPECONF(pipe), temp);
 
 	if (!lane) {
 		/*
@@ -4791,12 +4896,6 @@
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
 
-	/* setup pipeconf */
-	pipeconf = I915_READ(PIPECONF(pipe));
-
-	/* Set up the display plane register */
-	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 	drm_mode_debug_printmodeline(mode);
 
@@ -4856,12 +4955,6 @@
 		I915_WRITE(PCH_LVDS, temp);
 	}
 
-	pipeconf &= ~PIPECONF_DITHER_EN;
-	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
-	if ((is_lvds && dev_priv->lvds_dither) || dither) {
-		pipeconf |= PIPECONF_DITHER_EN;
-		pipeconf |= PIPECONF_DITHER_TYPE_SP;
-	}
 	if (is_dp && !is_cpu_edp) {
 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
 	} else {
@@ -4897,9 +4990,7 @@
 		}
 	}
 
-	pipeconf &= ~PIPECONF_INTERLACE_MASK;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		pipeconf |= PIPECONF_INTERLACED_ILK;
 		/* the chip adds 2 halflines automatically */
 		adjusted_mode->crtc_vtotal -= 1;
 		adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@
 			   adjusted_mode->crtc_hsync_start
 			   - adjusted_mode->crtc_htotal/2);
 	} else {
-		pipeconf |= PIPECONF_PROGRESSIVE;
 		I915_WRITE(VSYNCSHIFT(pipe), 0);
 	}
 
@@ -4945,15 +5035,15 @@
 	if (is_cpu_edp)
 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
-	I915_WRITE(PIPECONF(pipe), pipeconf);
-	POSTING_READ(PIPECONF(pipe));
+	ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
 	intel_wait_for_vblank(dev, pipe);
 
-	I915_WRITE(DSPCNTR(plane), dspcntr);
+	/* Set up the display plane register */
+	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
 	POSTING_READ(DSPCNTR(plane));
 
-	ret = intel_pipe_set_base(crtc, x, y, old_fb);
+	ret = intel_pipe_set_base(crtc, x, y, fb);
 
 	intel_update_watermarks(dev);
 
@@ -4966,7 +5056,7 @@
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode,
 			       int x, int y,
-			       struct drm_framebuffer *old_fb)
+			       struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@
 	drm_vblank_pre_modeset(dev, pipe);
 
 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
-					      x, y, old_fb);
+					      x, y, fb);
 	drm_vblank_post_modeset(dev, pipe);
 
-	if (ret)
-		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
-	else
-		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
-
 	return ret;
 }
 
@@ -5057,6 +5142,91 @@
 	I915_WRITE(G4X_AUD_CNTL_ST, i);
 }
 
+static void haswell_write_eld(struct drm_connector *connector,
+				     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	struct drm_device *dev = crtc->dev;
+	uint32_t eldv;
+	uint32_t i;
+	int len;
+	int pipe = to_intel_crtc(crtc)->pipe;
+	int tmp;
+
+	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+	int aud_config = HSW_AUD_CFG(pipe);
+	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+	/* Audio output enable */
+	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+
+	/* Wait for 1 vertical blank */
+	intel_wait_for_vblank(dev, pipe);
+
+	/* Set ELD valid state */
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+	/* Enable HDMI mode */
+	tmp = I915_READ(aud_config);
+	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+	/* clear N_programing_enable and N_value_index */
+	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+	I915_WRITE(aud_config, tmp);
+
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
+		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+	} else
+		I915_WRITE(aud_config, 0);
+
+	if (intel_eld_uptodate(connector,
+			       aud_cntrl_st2, eldv,
+			       aud_cntl_st, IBX_ELD_ADDRESS,
+			       hdmiw_hdmiedid))
+		return;
+
+	i = I915_READ(aud_cntrl_st2);
+	i &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+	if (!eld[0])
+		return;
+
+	i = I915_READ(aud_cntl_st);
+	i &= ~IBX_ELD_ADDRESS;
+	I915_WRITE(aud_cntl_st, i);
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+	i = I915_READ(aud_cntrl_st2);
+	i |= eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+}
+
 static void ironlake_write_eld(struct drm_connector *connector,
 				     struct drm_crtc *crtc)
 {
@@ -5069,28 +5239,24 @@
 	int aud_config;
 	int aud_cntl_st;
 	int aud_cntrl_st2;
+	int pipe = to_intel_crtc(crtc)->pipe;
 
 	if (HAS_PCH_IBX(connector->dev)) {
-		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
-		aud_config = IBX_AUD_CONFIG_A;
-		aud_cntl_st = IBX_AUD_CNTL_ST_A;
+		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+		aud_config = IBX_AUD_CFG(pipe);
+		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
 	} else {
-		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
-		aud_config = CPT_AUD_CONFIG_A;
-		aud_cntl_st = CPT_AUD_CNTL_ST_A;
+		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+		aud_config = CPT_AUD_CFG(pipe);
+		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
 	}
 
-	i = to_intel_crtc(crtc)->pipe;
-	hdmiw_hdmiedid += i * 0x100;
-	aud_cntl_st += i * 0x100;
-	aud_config += i * 0x100;
-
-	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
 	i = I915_READ(aud_cntl_st);
-	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
 	if (!i) {
 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 		/* operate blindly on all ports */
@@ -5337,8 +5503,6 @@
 	uint32_t addr;
 	int ret;
 
-	DRM_DEBUG_KMS("\n");
-
 	/* if we want to turn off the cursor ignore width and height */
 	if (!handle) {
 		DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@
 	return fb;
 }
 
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
-				struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct intel_load_detect_pipe *old)
 {
 	struct intel_crtc *intel_crtc;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
 	struct drm_crtc *possible_crtc;
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = NULL;
 	struct drm_device *dev = encoder->dev;
-	struct drm_framebuffer *old_fb;
+	struct drm_framebuffer *fb;
 	int i = -1;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@
 	if (encoder->crtc) {
 		crtc = encoder->crtc;
 
-		intel_crtc = to_intel_crtc(crtc);
-		old->dpms_mode = intel_crtc->dpms_mode;
+		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
 
 		/* Make sure the crtc and connector are running */
-		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
-			struct drm_encoder_helper_funcs *encoder_funcs;
-			struct drm_crtc_helper_funcs *crtc_funcs;
-
-			crtc_funcs = crtc->helper_private;
-			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
-			encoder_funcs = encoder->helper_private;
-			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-		}
+		if (connector->dpms != DRM_MODE_DPMS_ON)
+			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
 
 		return true;
 	}
@@ -5653,19 +5809,17 @@
 		return false;
 	}
 
-	encoder->crtc = crtc;
-	connector->encoder = encoder;
+	intel_encoder->new_crtc = to_intel_crtc(crtc);
+	to_intel_connector(connector)->new_encoder = intel_encoder;
 
 	intel_crtc = to_intel_crtc(crtc);
-	old->dpms_mode = intel_crtc->dpms_mode;
+	old->dpms_mode = connector->dpms;
 	old->load_detect_temp = true;
 	old->release_fb = NULL;
 
 	if (!mode)
 		mode = &load_detect_mode;
 
-	old_fb = crtc->fb;
-
 	/* We need a framebuffer large enough to accommodate all accesses
 	 * that the plane may generate whilst we perform load detection.
 	 * We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@
 	 * not even exist) or that it is large enough to satisfy the
 	 * requested mode.
 	 */
-	crtc->fb = mode_fits_in_fbdev(dev, mode);
-	if (crtc->fb == NULL) {
+	fb = mode_fits_in_fbdev(dev, mode);
+	if (fb == NULL) {
 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
-		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
-		old->release_fb = crtc->fb;
+		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+		old->release_fb = fb;
 	} else
 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
-	if (IS_ERR(crtc->fb)) {
+	if (IS_ERR(fb)) {
 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-		crtc->fb = old_fb;
-		return false;
+		goto fail;
 	}
 
-	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+	if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
-		crtc->fb = old_fb;
-		return false;
+		goto fail;
 	}
 
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	return true;
+fail:
+	connector->encoder = NULL;
+	encoder->crtc = NULL;
+	return false;
 }
 
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
-				    struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old)
 {
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_device *dev = encoder->dev;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 		      connector->base.id, drm_get_connector_name(connector),
 		      encoder->base.id, drm_get_encoder_name(encoder));
 
 	if (old->load_detect_temp) {
-		connector->encoder = NULL;
-		drm_helper_disable_unused_functions(dev);
+		struct drm_crtc *crtc = encoder->crtc;
+
+		to_intel_connector(connector)->new_encoder = NULL;
+		intel_encoder->new_crtc = NULL;
+		intel_set_mode(crtc, NULL, 0, 0, NULL);
 
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@
 	}
 
 	/* Switch crtc and encoder back off if necessary */
-	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
-		encoder_funcs->dpms(encoder, old->dpms_mode);
-		crtc_funcs->dpms(crtc, old->dpms_mode);
-	}
+	if (old->dpms_mode != DRM_MODE_DPMS_ON)
+		connector->funcs->dpms(connector, old->dpms_mode);
 }
 
 /* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@
 	return mode;
 }
 
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
-	struct drm_device *dev = (struct drm_device *)arg;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (!list_empty(&dev_priv->mm.active_list)) {
-		/* Still processing requests, so just re-arm the timer. */
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-		return;
-	}
-
-	dev_priv->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
-	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
-	struct drm_crtc *crtc = &intel_crtc->base;
-	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
-	struct intel_framebuffer *intel_fb;
-
-	intel_fb = to_intel_framebuffer(crtc->fb);
-	if (intel_fb && intel_fb->obj->active) {
-		/* The framebuffer is still being accessed by the GPU. */
-		mod_timer(&intel_crtc->idle_timer, jiffies +
-			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-		return;
-	}
-
-	intel_crtc->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
 static void intel_increase_pllclock(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 	}
-
-	/* Schedule downclock */
-	mod_timer(&intel_crtc->idle_timer, jiffies +
-		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 }
 
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@
 
 }
 
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle.  Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    idle_work);
-	struct drm_device *dev = dev_priv->dev;
+	i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
 	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
 
 	if (!i915_powersave)
 		return;
 
-	mutex_lock(&dev->struct_mutex);
-
-	i915_update_gfx_val(dev_priv);
-
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		/* Skip inactive CRTCs */
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		if (!intel_crtc->busy)
-			intel_decrease_pllclock(crtc);
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_increase_pllclock(crtc);
 	}
-
-
-	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = NULL;
-	struct intel_framebuffer *intel_fb;
-	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_crtc *crtc;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	if (!dev_priv->busy) {
-		intel_sanitize_pm(dev);
-		dev_priv->busy = true;
-	} else
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-
-	if (obj == NULL)
+	if (!i915_powersave)
 		return;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_fb = to_intel_framebuffer(crtc->fb);
-		if (intel_fb->obj == obj) {
-			if (!intel_crtc->busy) {
-				/* Non-busy -> busy, upclock */
-				intel_increase_pllclock(crtc);
-				intel_crtc->busy = true;
-			} else {
-				/* Busy -> busy, put off timer */
-				mod_timer(&intel_crtc->idle_timer, jiffies +
-					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-			}
-		}
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_decrease_pllclock(crtc);
 	}
 }
 
@@ -6394,7 +6461,7 @@
 	default:
 		WARN_ONCE(1, "unknown plane in flip command\n");
 		ret = -ENODEV;
-		goto err;
+		goto err_unpin;
 	}
 
 	ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@
 		goto cleanup_pending;
 
 	intel_disable_fbc(dev);
-	intel_mark_busy(dev, obj);
+	intel_mark_fb_busy(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@
 	return ret;
 }
 
-static void intel_sanitize_modesetting(struct drm_device *dev,
-				       int pipe, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg, val;
-	int i;
-
-	/* Clear any frame start delays used for debugging left by the BIOS */
-	for_each_pipe(i) {
-		reg = PIPECONF(i);
-		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
-	}
-
-	if (HAS_PCH_SPLIT(dev))
-		return;
-
-	/* Who knows what state these registers were left in by the BIOS or
-	 * grub?
-	 *
-	 * If we leave the registers in a conflicting state (e.g. with the
-	 * display plane reading from the other pipe than the one we intend
-	 * to use) then when we attempt to teardown the active mode, we will
-	 * not disable the pipes and planes in the correct order -- leaving
-	 * a plane reading from a disabled pipe and possibly leading to
-	 * undefined behaviour.
-	 */
-
-	reg = DSPCNTR(plane);
-	val = I915_READ(reg);
-
-	if ((val & DISPLAY_PLANE_ENABLE) == 0)
-		return;
-	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
-		return;
-
-	/* This display plane is active and attached to the other CPU pipe. */
-	pipe = !pipe;
-
-	/* Disable the plane and wait for it to stop reading from the pipe. */
-	intel_disable_plane(dev_priv, plane, pipe);
-	intel_disable_pipe(dev_priv, pipe);
-}
-
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	/* Reset flags back to the 'unknown' status so that they
-	 * will be correctly set on the initial modeset.
-	 */
-	intel_crtc->dpms_mode = -1;
-
-	/* We need to fix up any BIOS configuration that conflicts with
-	 * our expectations.
-	 */
-	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
-}
-
 static struct drm_crtc_helper_funcs intel_helper_funcs = {
-	.dpms = intel_crtc_dpms,
-	.mode_fixup = intel_crtc_mode_fixup,
-	.mode_set = intel_crtc_mode_set,
-	.mode_set_base = intel_pipe_set_base,
 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
 	.load_lut = intel_crtc_load_lut,
-	.disable = intel_crtc_disable,
+	.disable = intel_crtc_noop,
 };
 
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
+{
+	struct intel_encoder *other_encoder;
+	struct drm_crtc *crtc = &encoder->new_crtc->base;
+
+	if (WARN_ON(!crtc))
+		return false;
+
+	list_for_each_entry(other_encoder,
+			    &crtc->dev->mode_config.encoder_list,
+			    base.head) {
+
+		if (&other_encoder->new_crtc->base != crtc ||
+		    encoder == other_encoder)
+			continue;
+		else
+			return true;
+	}
+
+	return false;
+}
+
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+				  struct drm_crtc *crtc)
+{
+	struct drm_device *dev;
+	struct drm_crtc *tmp;
+	int crtc_mask = 1;
+
+	WARN(!crtc, "checking null crtc?\n");
+
+	dev = crtc->dev;
+
+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+		if (tmp == crtc)
+			break;
+		crtc_mask <<= 1;
+	}
+
+	if (encoder->possible_crtcs & crtc_mask)
+		return true;
+	return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->new_encoder =
+			to_intel_encoder(connector->base.encoder);
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(encoder->base.crtc);
+	}
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->base.encoder = &connector->new_encoder->base;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->base.crtc = &encoder->new_crtc->base;
+	}
+}
+
+static struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_display_mode *adjusted_mode;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct intel_encoder *encoder;
+
+	adjusted_mode = drm_mode_duplicate(dev, mode);
+	if (!adjusted_mode)
+		return ERR_PTR(-ENOMEM);
+
+	/* Pass our mode to the connectors and the CRTC to give them a chance to
+	 * adjust it according to limitations or connector properties, and also
+	 * a chance to reject the mode entirely.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+
+		if (&encoder->new_crtc->base != crtc)
+			continue;
+		encoder_funcs = encoder->base.helper_private;
+		if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+						adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
+			goto fail;
+		}
+	}
+
+	if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		goto fail;
+	}
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+	return adjusted_mode;
+fail:
+	drm_mode_destroy(dev, adjusted_mode);
+	return ERR_PTR(-EINVAL);
+}
+
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+			     unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	struct drm_crtc *tmp_crtc;
+
+	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+	/* Check which crtcs have changed outputs connected to them, these need
+	 * to be part of the prepare_pipes mask. We don't (yet) support global
+	 * modeset across multiple crtcs, so modeset_pipes will only have one
+	 * bit set at most. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->base.encoder == &connector->new_encoder->base)
+			continue;
+
+		if (connector->base.encoder) {
+			tmp_crtc = connector->base.encoder->crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (connector->new_encoder)
+			*prepare_pipes |=
+				1 << connector->new_encoder->new_crtc->pipe;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (encoder->base.crtc == &encoder->new_crtc->base)
+			continue;
+
+		if (encoder->base.crtc) {
+			tmp_crtc = encoder->base.crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (encoder->new_crtc)
+			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
+	}
+
+	/* Check for any pipes that will be fully disabled ... */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool used = false;
+
+		/* Don't try to disable disabled crtcs. */
+		if (!intel_crtc->base.enabled)
+			continue;
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->new_crtc == intel_crtc)
+				used = true;
+		}
+
+		if (!used)
+			*disable_pipes |= 1 << intel_crtc->pipe;
+	}
+
+
+	/* set_mode is also used to update properties on life display pipes. */
+	intel_crtc = to_intel_crtc(crtc);
+	if (crtc->enabled)
+		*prepare_pipes |= 1 << intel_crtc->pipe;
+
+	/* We only support modeset on one single crtc, hence we need to do that
+	 * only for the passed in crtc iff we change anything else than just
+	 * disable crtcs.
+	 *
+	 * This is actually not true, to be fully compatible with the old crtc
+	 * helper we automatically disable _any_ output (i.e. doesn't need to be
+	 * connected to the crtc we're modesetting on) if it's disconnected.
+	 * Which is a rather nutty api (since changed the output configuration
+	 * without userspace's explicit request can lead to confusion), but
+	 * alas. Hence we currently need to modeset on all pipes we prepare. */
+	if (*prepare_pipes)
+		*modeset_pipes = *prepare_pipes;
+
+	/* ... and mask these out. */
+	*modeset_pipes &= ~(*disable_pipes);
+	*prepare_pipes &= ~(*disable_pipes);
+}
+
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev = crtc->dev;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc)
+			return true;
+
+	return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+	struct intel_encoder *intel_encoder;
+	struct intel_crtc *intel_crtc;
+	struct drm_connector *connector;
+
+	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!intel_encoder->base.crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe))
+			intel_encoder->connectors_active = false;
+	}
+
+	intel_modeset_commit_output_state(dev);
+
+	/* Update computed state. */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe)) {
+			struct drm_property *dpms_property =
+				dev->mode_config.dpms_property;
+
+			connector->dpms = DRM_MODE_DPMS_ON;
+			drm_connector_property_set_value(connector,
+							 dpms_property,
+							 DRM_MODE_DPMS_ON);
+
+			intel_encoder = to_intel_encoder(connector->encoder);
+			intel_encoder->connectors_active = true;
+		}
+	}
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+	list_for_each_entry((intel_crtc), \
+			    &(dev)->mode_config.crtc_list, \
+			    base.head) \
+		if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* This also checks the encoder/connector hw state with the
+		 * ->get_hw_state callbacks. */
+		intel_connector_check_state(connector);
+
+		WARN(&connector->new_encoder->base != connector->base.encoder,
+		     "connector's staged encoder doesn't match current encoder\n");
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+		enum pipe pipe, tracked_pipe;
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		WARN(&encoder->new_crtc->base != encoder->base.crtc,
+		     "encoder's stage crtc doesn't match current crtc\n");
+		WARN(encoder->connectors_active && !encoder->base.crtc,
+		     "encoder's active_connectors set, but no crtc\n");
+
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->base.encoder != &encoder->base)
+				continue;
+			enabled = true;
+			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+				active = true;
+		}
+		WARN(!!encoder->base.crtc != enabled,
+		     "encoder's enabled state mismatch "
+		     "(expected %i, found %i)\n",
+		     !!encoder->base.crtc, enabled);
+		WARN(active && !encoder->base.crtc,
+		     "active encoder with no crtc\n");
+
+		WARN(encoder->connectors_active != active,
+		     "encoder's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+		active = encoder->get_hw_state(encoder, &pipe);
+		WARN(active != encoder->connectors_active,
+		     "encoder's hw state doesn't match sw tracking "
+		     "(expected %i, found %i)\n",
+		     encoder->connectors_active, active);
+
+		if (!encoder->base.crtc)
+			continue;
+
+		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+		WARN(active && pipe != tracked_pipe,
+		     "active encoder's pipe doesn't match"
+		     "(expected %i, found %i)\n",
+		     tracked_pipe, pipe);
+
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+
+		DRM_DEBUG_KMS("[CRTC:%d]\n",
+			      crtc->base.base.id);
+
+		WARN(crtc->active && !crtc->base.enabled,
+		     "active crtc, but not enabled in sw tracking\n");
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->base.crtc != &crtc->base)
+				continue;
+			enabled = true;
+			if (encoder->connectors_active)
+				active = true;
+		}
+		WARN(active != crtc->active,
+		     "crtc's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, crtc->active);
+		WARN(enabled != crtc->base.enabled,
+		     "crtc's computed enabled state doesn't match tracked enabled state "
+		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+		assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+	}
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+		    struct drm_display_mode *mode,
+		    int x, int y, struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_encoder *encoder;
+	struct intel_crtc *intel_crtc;
+	unsigned disable_pipes, prepare_pipes, modeset_pipes;
+	bool ret = true;
+
+	intel_modeset_affected_pipes(crtc, &modeset_pipes,
+				     &prepare_pipes, &disable_pipes);
+
+	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+		      modeset_pipes, prepare_pipes, disable_pipes);
+
+	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+		intel_crtc_disable(&intel_crtc->base);
+
+	saved_hwmode = crtc->hwmode;
+	saved_mode = crtc->mode;
+
+	/* Hack: Because we don't (yet) support global modeset on multiple
+	 * crtcs, we don't keep track of the new mode for more than one crtc.
+	 * Hence simply check whether any bit is set in modeset_pipes in all the
+	 * pieces of code that are not yet converted to deal with mutliple crtcs
+	 * changing their mode at the same time. */
+	adjusted_mode = NULL;
+	if (modeset_pipes) {
+		adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+		if (IS_ERR(adjusted_mode)) {
+			return false;
+		}
+	}
+
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+		if (intel_crtc->base.enabled)
+			dev_priv->display.crtc_disable(&intel_crtc->base);
+	}
+
+	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
+	 * to set it here already despite that we pass it down the callchain.
+	 */
+	if (modeset_pipes)
+		crtc->mode = *mode;
+
+	/* Only after disabling all output pipelines that will be changed can we
+	 * update the the output configuration. */
+	intel_modeset_update_state(dev, prepare_pipes);
+
+	/* Set up the DPLL and any encoders state that needs to adjust or depend
+	 * on the DPLL.
+	 */
+	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+		ret = !intel_crtc_mode_set(&intel_crtc->base,
+					   mode, adjusted_mode,
+					   x, y, fb);
+		if (!ret)
+		    goto done;
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+			if (encoder->crtc != &intel_crtc->base)
+				continue;
+
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+				encoder->base.id, drm_get_encoder_name(encoder),
+				mode->base.id, mode->name);
+			encoder_funcs = encoder->helper_private;
+			encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+		}
+	}
+
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+		dev_priv->display.crtc_enable(&intel_crtc->base);
+
+	if (modeset_pipes) {
+		/* Store real post-adjustment hardware mode. */
+		crtc->hwmode = *adjusted_mode;
+
+		/* Calculate and store various constants which
+		 * are later needed by vblank and swap-completion
+		 * timestamping. They are derived from true hwmode.
+		 */
+		drm_calc_timestamping_constants(crtc);
+	}
+
+	/* FIXME: add subpixel order */
+done:
+	drm_mode_destroy(dev, adjusted_mode);
+	if (!ret && crtc->enabled) {
+		crtc->hwmode = saved_hwmode;
+		crtc->mode = saved_mode;
+	} else {
+		intel_modeset_check_state(dev);
+	}
+
+	return ret;
+}
+
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+	if (!config)
+		return;
+
+	kfree(config->save_connector_encoders);
+	kfree(config->save_encoder_crtcs);
+	kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+				       struct intel_set_config *config)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int count;
+
+	config->save_encoder_crtcs =
+		kcalloc(dev->mode_config.num_encoder,
+			sizeof(struct drm_crtc *), GFP_KERNEL);
+	if (!config->save_encoder_crtcs)
+		return -ENOMEM;
+
+	config->save_connector_encoders =
+		kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_encoder *), GFP_KERNEL);
+	if (!config->save_connector_encoders)
+		return -ENOMEM;
+
+	/* Copy data. Note that driver private data is not affected.
+	 * Should anything bad happen only the expected state is
+	 * restored, not the drivers personal bookkeeping.
+	 */
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		config->save_encoder_crtcs[count++] = encoder->crtc;
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		config->save_connector_encoders[count++] = connector->encoder;
+	}
+
+	return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+					   struct intel_set_config *config)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	int count;
+
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(config->save_encoder_crtcs[count++]);
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+		connector->new_encoder =
+			to_intel_encoder(config->save_connector_encoders[count++]);
+	}
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+				      struct intel_set_config *config)
+{
+
+	/* We should be able to check here if the fb has the same properties
+	 * and then just flip_or_move it */
+	if (set->crtc->fb != set->fb) {
+		/* If we have no fb then treat it as a full mode set */
+		if (set->crtc->fb == NULL) {
+			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+			config->mode_changed = true;
+		} else if (set->fb == NULL) {
+			config->mode_changed = true;
+		} else if (set->fb->depth != set->crtc->fb->depth) {
+			config->mode_changed = true;
+		} else if (set->fb->bits_per_pixel !=
+			   set->crtc->fb->bits_per_pixel) {
+			config->mode_changed = true;
+		} else
+			config->fb_changed = true;
+	}
+
+	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+		config->fb_changed = true;
+
+	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+		DRM_DEBUG_KMS("modes are different, full mode set\n");
+		drm_mode_debug_printmodeline(&set->crtc->mode);
+		drm_mode_debug_printmodeline(set->mode);
+		config->mode_changed = true;
+	}
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+				 struct drm_mode_set *set,
+				 struct intel_set_config *config)
+{
+	struct drm_crtc *new_crtc;
+	struct intel_connector *connector;
+	struct intel_encoder *encoder;
+	int count, ro;
+
+	/* The upper layers ensure that we either disabl a crtc or have a list
+	 * of connectors. For paranoia, double-check this. */
+	WARN_ON(!set->fb && (set->num_connectors != 0));
+	WARN_ON(set->fb && (set->num_connectors == 0));
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* Otherwise traverse passed in connector list and get encoders
+		 * for them. */
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base) {
+				connector->new_encoder = connector->encoder;
+				break;
+			}
+		}
+
+		/* If we disable the crtc, disable all its connectors. Also, if
+		 * the connector is on the changing crtc but not on the new
+		 * connector list, disable it. */
+		if ((!set->fb || ro == set->num_connectors) &&
+		    connector->base.encoder &&
+		    connector->base.encoder->crtc == set->crtc) {
+			connector->new_encoder = NULL;
+
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+				connector->base.base.id,
+				drm_get_connector_name(&connector->base));
+		}
+
+
+		if (&connector->new_encoder->base != connector->base.encoder) {
+			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+
+		/* Disable all disconnected encoders. */
+		if (connector->base.status == connector_status_disconnected)
+			connector->new_encoder = NULL;
+	}
+	/* connector->new_encoder is now updated for all connectors. */
+
+	/* Update crtc of enabled connectors. */
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (!connector->new_encoder)
+			continue;
+
+		new_crtc = connector->new_encoder->base.crtc;
+
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base)
+				new_crtc = set->crtc;
+		}
+
+		/* Make sure the new CRTC will work with the encoder */
+		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+					   new_crtc)) {
+			return -EINVAL;
+		}
+		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+			connector->base.base.id,
+			drm_get_connector_name(&connector->base),
+			new_crtc->base.id);
+	}
+
+	/* Check for any encoders that needs to be disabled. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->new_encoder == encoder) {
+				WARN_ON(!connector->new_encoder->new_crtc);
+
+				goto next_encoder;
+			}
+		}
+		encoder->new_crtc = NULL;
+next_encoder:
+		/* Only now check for crtc changes so we don't miss encoders
+		 * that will be disabled. */
+		if (&encoder->new_crtc->base != encoder->base.crtc) {
+			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+	}
+	/* Now we've also updated encoder->new_crtc for all encoders. */
+
+	return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct drm_mode_set save_set;
+	struct intel_set_config *config;
+	int ret;
+
+	BUG_ON(!set);
+	BUG_ON(!set->crtc);
+	BUG_ON(!set->crtc->helper_private);
+
+	if (!set->mode)
+		set->fb = NULL;
+
+	/* The fb helper likes to play gross jokes with ->mode_set_config.
+	 * Unfortunately the crtc helper doesn't do much at all for this case,
+	 * so we have to cope with this madness until the fb helper is fixed up. */
+	if (set->fb && set->num_connectors == 0)
+		return 0;
+
+	if (set->fb) {
+		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+				set->crtc->base.id, set->fb->base.id,
+				(int)set->num_connectors, set->x, set->y);
+	} else {
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+	}
+
+	dev = set->crtc->dev;
+
+	ret = -ENOMEM;
+	config = kzalloc(sizeof(*config), GFP_KERNEL);
+	if (!config)
+		goto out_config;
+
+	ret = intel_set_config_save_state(dev, config);
+	if (ret)
+		goto out_config;
+
+	save_set.crtc = set->crtc;
+	save_set.mode = &set->crtc->mode;
+	save_set.x = set->crtc->x;
+	save_set.y = set->crtc->y;
+	save_set.fb = set->crtc->fb;
+
+	/* Compute whether we need a full modeset, only an fb base update or no
+	 * change at all. In the future we might also check whether only the
+	 * mode changed, e.g. for LVDS where we only change the panel fitter in
+	 * such cases. */
+	intel_set_config_compute_mode_changes(set, config);
+
+	ret = intel_modeset_stage_output_state(dev, set, config);
+	if (ret)
+		goto fail;
+
+	if (config->mode_changed) {
+		if (set->mode) {
+			DRM_DEBUG_KMS("attempting to set mode from"
+					" userspace\n");
+			drm_mode_debug_printmodeline(set->mode);
+		}
+
+		if (!intel_set_mode(set->crtc, set->mode,
+				    set->x, set->y, set->fb)) {
+			DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+				  set->crtc->base.id);
+			ret = -EINVAL;
+			goto fail;
+		}
+	} else if (config->fb_changed) {
+		ret = intel_pipe_set_base(set->crtc,
+					  set->x, set->y, set->fb);
+	}
+
+	intel_set_config_free(config);
+
+	return 0;
+
+fail:
+	intel_set_config_restore_state(dev, config);
+
+	/* Try to restore the config */
+	if (config->mode_changed &&
+	    !intel_set_mode(save_set.crtc, save_set.mode,
+			    save_set.x, save_set.y, save_set.fb))
+		DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+	intel_set_config_free(config);
+	return ret;
+}
+
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-	.reset = intel_crtc_reset,
 	.cursor_set = intel_crtc_cursor_set,
 	.cursor_move = intel_crtc_cursor_move,
 	.gamma_set = intel_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
+	.set_config = intel_crtc_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 };
@@ -6655,24 +7448,9 @@
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-	intel_crtc_reset(&intel_crtc->base);
-	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
-	if (HAS_PCH_SPLIT(dev)) {
-		intel_helper_funcs.prepare = ironlake_crtc_prepare;
-		intel_helper_funcs.commit = ironlake_crtc_commit;
-	} else {
-		intel_helper_funcs.prepare = i9xx_crtc_prepare;
-		intel_helper_funcs.commit = i9xx_crtc_commit;
-	}
-
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
-	intel_crtc->busy = false;
-
-	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
-		    (unsigned long)intel_crtc);
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@
 	return 0;
 }
 
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
 {
-	struct intel_encoder *encoder;
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_encoder *source_encoder;
 	int index_mask = 0;
 	int entry = 0;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
-		if (type_mask & encoder->clone_mask)
+	list_for_each_entry(source_encoder,
+			    &dev->mode_config.encoder_list, base.head) {
+
+		if (encoder == source_encoder)
 			index_mask |= (1 << entry);
+
+		/* Intel hw has only one MUX where enocoders could be cloned. */
+		if (encoder->cloneable && source_encoder->cloneable)
+			index_mask |= (1 << entry);
+
 		entry++;
 	}
 
@@ -6748,10 +7534,10 @@
 		dpd_is_edp = intel_dpd_is_edp(dev);
 
 		if (has_edp_a(dev))
-			intel_dp_init(dev, DP_A);
+			intel_dp_init(dev, DP_A, PORT_A);
 
 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	}
 
 	intel_crt_init(dev);
@@ -6782,22 +7568,22 @@
 			/* PCH SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, PCH_SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, HDMIB);
+				intel_hdmi_init(dev, HDMIB, PORT_B);
 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
-				intel_dp_init(dev, PCH_DP_B);
+				intel_dp_init(dev, PCH_DP_B, PORT_B);
 		}
 
 		if (I915_READ(HDMIC) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMIC);
+			intel_hdmi_init(dev, HDMIC, PORT_C);
 
 		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMID);
+			intel_hdmi_init(dev, HDMID, PORT_D);
 
 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
-			intel_dp_init(dev, PCH_DP_C);
+			intel_dp_init(dev, PCH_DP_C, PORT_C);
 
 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
 		int found;
 
@@ -6805,17 +7591,17 @@
 			/* SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			if (!found && (I915_READ(DP_B) & DP_DETECTED))
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
 		if (I915_READ(SDVOC) & PORT_DETECTED)
-			intel_hdmi_init(dev, SDVOC);
+			intel_hdmi_init(dev, SDVOC, PORT_C);
 
 		/* Shares lanes with HDMI on SDVOC */
 		if (I915_READ(DP_C) & DP_DETECTED)
-			intel_dp_init(dev, DP_C);
+			intel_dp_init(dev, DP_C, PORT_C);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -6824,12 +7610,12 @@
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			}
 
 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_B\n");
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 			}
 		}
 
@@ -6844,18 +7630,18 @@
 
 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-				intel_hdmi_init(dev, SDVOC);
+				intel_hdmi_init(dev, SDVOC, PORT_C);
 			}
 			if (SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_C\n");
-				intel_dp_init(dev, DP_C);
+				intel_dp_init(dev, DP_C, PORT_C);
 			}
 		}
 
 		if (SUPPORTS_INTEGRATED_DP(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED)) {
 			DRM_DEBUG_KMS("probing DP_D\n");
-			intel_dp_init(dev, DP_D);
+			intel_dp_init(dev, DP_D, PORT_D);
 		}
 	} else if (IS_GEN2(dev))
 		intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 		encoder->base.possible_crtcs = encoder->crtc_mask;
 		encoder->base.possible_clones =
-			intel_encoder_clones(dev, encoder->clone_mask);
+			intel_encoder_clones(encoder);
 	}
 
-	/* disable all the possible outputs/crtcs before entering KMS mode */
-	drm_helper_disable_unused_functions(dev);
-
 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 		ironlake_init_pch_refclk(dev);
 }
@@ -6973,13 +7756,15 @@
 
 	/* We always want a DPMS function */
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->display.dpms = ironlake_crtc_dpms;
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+		dev_priv->display.crtc_enable = ironlake_crtc_enable;
+		dev_priv->display.crtc_disable = ironlake_crtc_disable;
 		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else {
-		dev_priv->display.dpms = i9xx_crtc_dpms;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
 		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_plane = i9xx_update_plane;
 	}
@@ -7023,7 +7808,7 @@
 			dev_priv->display.write_eld = ironlake_write_eld;
 		} else if (IS_HASWELL(dev)) {
 			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-			dev_priv->display.write_eld = ironlake_write_eld;
+			dev_priv->display.write_eld = haswell_write_eld;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@
 	/* HP Mini needs pipe A force quirk (LP: #322104) */
 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
 
-	/* Thinkpad R31 needs pipe A force quirk */
-	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
 
-	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
-	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
-	/* ThinkPad X40 needs pipe A force quirk */
-
 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
 	/* 855 & before need to leave pipe A & dpll A up */
 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
 	/* Lenovo U160 cannot use SSC on LVDS */
 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
 	intel_setup_outputs(dev);
+}
 
-	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
-	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
-		    (unsigned long)dev);
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+	connector->base.dpms = DRM_MODE_DPMS_OFF;
+	connector->base.encoder = NULL;
+	connector->encoder->connectors_active = false;
+	connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+	struct drm_connector *crt = NULL;
+	struct intel_load_detect_pipe load_detect_temp;
+
+	/* We can't just switch on the pipe A, we need to set things up with a
+	 * proper mode and output configuration. As a gross hack, enable pipe A
+	 * by enabling the load detect pipe once. */
+	list_for_each_entry(connector,
+			    &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+			crt = &connector->base;
+			break;
+		}
+	}
+
+	if (!crt)
+		return;
+
+	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+		intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg, val;
+
+	/* Clear any frame start delays used for debugging left by the BIOS */
+	reg = PIPECONF(crtc->pipe);
+	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+	/* We need to sanitize the plane -> pipe mapping first because this will
+	 * disable the crtc (and hence change the state) if it is wrong. */
+	if (!HAS_PCH_SPLIT(dev)) {
+		struct intel_connector *connector;
+		bool plane;
+
+		reg = DSPCNTR(crtc->plane);
+		val = I915_READ(reg);
+
+		if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
+		    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+			goto ok;
+
+		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+			      crtc->base.base.id);
+
+		/* Pipe has the wrong plane attached and the plane is active.
+		 * Temporarily change the plane mapping and disable everything
+		 * ...  */
+		plane = crtc->plane;
+		crtc->plane = !plane;
+		dev_priv->display.crtc_disable(&crtc->base);
+		crtc->plane = plane;
+
+		/* ... and break all links. */
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder->base.crtc != &crtc->base)
+				continue;
+
+			intel_connector_break_all_links(connector);
+		}
+
+		WARN_ON(crtc->active);
+		crtc->base.enabled = false;
+	}
+ok:
+
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    crtc->pipe == PIPE_A && !crtc->active) {
+		/* BIOS forgot to enable pipe A, this mostly happens after
+		 * resume. Force-enable the pipe to fix this, the update_dpms
+		 * call below we restore the pipe to the right state, but leave
+		 * the required bits on. */
+		intel_enable_pipe_a(dev);
+	}
+
+	/* Adjust the state of the output pipe according to whether we
+	 * have active connectors/encoders. */
+	intel_crtc_update_dpms(&crtc->base);
+
+	if (crtc->active != crtc->base.enabled) {
+		struct intel_encoder *encoder;
+
+		/* This can happen either due to bugs in the get_hw_state
+		 * functions or because the pipe is force-enabled due to the
+		 * pipe A quirk. */
+		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+			      crtc->base.base.id,
+			      crtc->base.enabled ? "enabled" : "disabled",
+			      crtc->active ? "enabled" : "disabled");
+
+		crtc->base.enabled = crtc->active;
+
+		/* Because we only establish the connector -> encoder ->
+		 * crtc links if something is active, this means the
+		 * crtc is now deactivated. Break the links. connector
+		 * -> encoder links are only establish when things are
+		 *  actually up, hence no need to break them. */
+		WARN_ON(crtc->active);
+
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+			WARN_ON(encoder->connectors_active);
+			encoder->base.crtc = NULL;
+		}
+	}
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+	struct intel_connector *connector;
+	struct drm_device *dev = encoder->base.dev;
+
+	/* We need to check both for a crtc link (meaning that the
+	 * encoder is active and trying to read from a pipe) and the
+	 * pipe itself being active. */
+	bool has_active_crtc = encoder->base.crtc &&
+		to_intel_crtc(encoder->base.crtc)->active;
+
+	if (encoder->connectors_active && !has_active_crtc) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		/* Connector is active, but has no active pipe. This is
+		 * fallout from our resume register restoring. Disable
+		 * the encoder manually again. */
+		if (encoder->base.crtc) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+				      encoder->base.base.id,
+				      drm_get_encoder_name(&encoder->base));
+			encoder->disable(encoder);
+		}
+
+		/* Inconsistent output/port/pipe state happens presumably due to
+		 * a bug in one of the get_hw_state functions. Or someplace else
+		 * in our code, like the register restore mess on resume. Clamp
+		 * things to off as a safer default. */
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder != encoder)
+				continue;
+
+			intel_connector_break_all_links(connector);
+		}
+	}
+	/* Enabled encoders without active connectors will be fixed in
+	 * the crtc fixup. */
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	u32 tmp;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		tmp = I915_READ(PIPECONF(pipe));
+		if (tmp & PIPECONF_ENABLE)
+			crtc->active = true;
+		else
+			crtc->active = false;
+
+		crtc->base.enabled = crtc->active;
+
+		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+			      crtc->base.base.id,
+			      crtc->active ? "enabled" : "disabled");
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		pipe = 0;
+
+		if (encoder->get_hw_state(encoder, &pipe)) {
+			encoder->base.crtc =
+				dev_priv->pipe_to_crtc_mapping[pipe];
+		} else {
+			encoder->base.crtc = NULL;
+		}
+
+		encoder->connectors_active = false;
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base),
+			      encoder->base.crtc ? "enabled" : "disabled",
+			      pipe);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->get_hw_state(connector)) {
+			connector->base.dpms = DRM_MODE_DPMS_ON;
+			connector->encoder->connectors_active = true;
+			connector->base.encoder = &connector->encoder->base;
+		} else {
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base),
+			      connector->base.encoder ? "enabled" : "disabled");
+	}
+
+	/* HW state is read out, now we need to sanitize this mess. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		intel_sanitize_encoder(encoder);
+	}
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		intel_sanitize_crtc(crtc);
+	}
+
+	intel_modeset_update_staged_output_state(dev);
+
+	intel_modeset_check_state(dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@
 	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
+
+	intel_modeset_setup_hw_state(dev);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@
 	 * enqueue unpin/hotplug work. */
 	drm_irq_uninstall(dev);
 	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_work_sync(&dev_priv->rps_work);
+	cancel_work_sync(&dev_priv->rps.work);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
-	/* Shut off idle work before the crtcs get freed. */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		intel_crtc = to_intel_crtc(crtc);
-		del_timer_sync(&intel_crtc->idle_timer);
-	}
-	del_timer_sync(&dev_priv->idle_timer);
-	cancel_work_sync(&dev_priv->idle_work);
-
 	drm_mode_config_cleanup(dev);
 }
 
@@ -7338,7 +8353,7 @@
 		u32 position;
 		u32 base;
 		u32 size;
-	} cursor[2];
+	} cursor[I915_MAX_PIPES];
 
 	struct intel_pipe_error_state {
 		u32 conf;
@@ -7350,7 +8365,7 @@
 		u32 vtotal;
 		u32 vblank;
 		u32 vsync;
-	} pipe[2];
+	} pipe[I915_MAX_PIPES];
 
 	struct intel_plane_error_state {
 		u32 control;
@@ -7360,7 +8375,7 @@
 		u32 addr;
 		u32 surface;
 		u32 tile_offset;
-	} plane[2];
+	} plane[I915_MAX_PIPES];
 };
 
 struct intel_display_error_state *
@@ -7374,7 +8389,7 @@
 	if (error == NULL)
 		return NULL;
 
-	for (i = 0; i < 2; i++) {
+	for_each_pipe(i) {
 		error->cursor[i].control = I915_READ(CURCNTR(i));
 		error->cursor[i].position = I915_READ(CURPOS(i));
 		error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@
 				struct drm_device *dev,
 				struct intel_display_error_state *error)
 {
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
 
-	for (i = 0; i < 2; i++) {
+	seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+	for_each_pipe(i) {
 		seq_printf(m, "Pipe [%d]:\n", i);
 		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
 		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ace757a..1474f84 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,42 +36,10 @@
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "drm_dp_helper.h"
 
-#define DP_RECEIVER_CAP_SIZE	0xf
 #define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
-#define DP_LINK_CONFIGURATION_SIZE	9
-
-struct intel_dp {
-	struct intel_encoder base;
-	uint32_t output_reg;
-	uint32_t DP;
-	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-	bool has_audio;
-	enum hdmi_force_audio force_audio;
-	uint32_t color_range;
-	int dpms_mode;
-	uint8_t link_bw;
-	uint8_t lane_count;
-	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
-	struct i2c_adapter adapter;
-	struct i2c_algo_dp_aux_data algo;
-	bool is_pch_edp;
-	uint8_t	train_set[4];
-	int panel_power_up_delay;
-	int panel_power_down_delay;
-	int panel_power_cycle_delay;
-	int backlight_on_delay;
-	int backlight_off_delay;
-	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
-	struct delayed_work panel_vdd_work;
-	bool want_panel_vdd;
-	struct edid *edid; /* cached EDID for eDP */
-	int edid_mode_count;
-};
-
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -840,9 +808,6 @@
 	}
 }
 
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
 static void
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
@@ -853,14 +818,6 @@
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	/* Turn on the eDP PLL if needed */
-	if (is_edp(intel_dp)) {
-		if (!is_pch_edp(intel_dp))
-			ironlake_edp_pll_on(encoder);
-		else
-			ironlake_edp_pll_off(encoder);
-	}
-
 	/*
 	 * There are four kinds of DP registers:
 	 *
@@ -882,10 +839,8 @@
 	 * supposed to be read-only.
 	 */
 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
-	intel_dp->DP |=  DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
 	/* Handle DP bits in common between all three register formats */
-
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
 	switch (intel_dp->lane_count) {
@@ -932,7 +887,6 @@
 		intel_dp->DP |= intel_crtc->pipe << 29;
 
 		/* don't miss out required setting for eDP */
-		intel_dp->DP |= DP_PLL_ENABLE;
 		if (adjusted_mode->clock < 200000)
 			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 		else
@@ -954,7 +908,6 @@
 
 		if (is_cpu_edp(intel_dp)) {
 			/* don't miss out required setting for eDP */
-			intel_dp->DP |= DP_PLL_ENABLE;
 			if (adjusted_mode->clock < 200000)
 				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 			else
@@ -1225,27 +1178,49 @@
 	msleep(intel_dp->backlight_off_delay);
 }
 
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
 	DRM_DEBUG_KMS("\n");
 	dpa_ctl = I915_READ(DP_A);
-	dpa_ctl |= DP_PLL_ENABLE;
-	I915_WRITE(DP_A, dpa_ctl);
+	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We don't adjust intel_dp->DP while tearing down the link, to
+	 * facilitate link retraining (e.g. after hotplug). Hence clear all
+	 * enable bits here to ensure that we don't enable too much. */
+	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+	intel_dp->DP |= DP_PLL_ENABLE;
+	I915_WRITE(DP_A, intel_dp->DP);
 	POSTING_READ(DP_A);
 	udelay(200);
 }
 
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
 	dpa_ctl = I915_READ(DP_A);
+	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+	     "dp pll off, should be on\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We can't rely on the value tracked for the DP register in
+	 * intel_dp->DP because link_down must not change that (otherwise link
+	 * re-training will fail. */
 	dpa_ctl &= ~DP_PLL_ENABLE;
 	I915_WRITE(DP_A, dpa_ctl);
 	POSTING_READ(DP_A);
@@ -1282,10 +1257,57 @@
 	}
 }
 
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+				  enum pipe *pipe)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(intel_dp->output_reg);
 
+	if (!(tmp & DP_PORT_EN))
+		return false;
+
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+		*pipe = PORT_TO_PIPE(tmp);
+	} else {
+		u32 trans_sel;
+		u32 trans_dp;
+		int i;
+
+		switch (intel_dp->output_reg) {
+		case PCH_DP_B:
+			trans_sel = TRANS_DP_PORT_SEL_B;
+			break;
+		case PCH_DP_C:
+			trans_sel = TRANS_DP_PORT_SEL_C;
+			break;
+		case PCH_DP_D:
+			trans_sel = TRANS_DP_PORT_SEL_D;
+			break;
+		default:
+			return true;
+		}
+
+		for_each_pipe(i) {
+			trans_dp = I915_READ(TRANS_DP_CTL(i));
+			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+				*pipe = i;
+				return true;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
+	return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 	/* Make sure the panel is off before trying to change the mode. But also
 	 * ensure that we have vdd while we switch off the panel. */
@@ -1293,14 +1315,31 @@
 	ironlake_edp_backlight_off(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	ironlake_edp_panel_off(intel_dp);
-	intel_dp_link_down(intel_dp);
+
+	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+	if (!is_cpu_edp(intel_dp))
+		intel_dp_link_down(intel_dp);
 }
 
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_post_disable_dp(struct intel_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	if (is_cpu_edp(intel_dp)) {
+		intel_dp_link_down(intel_dp);
+		ironlake_edp_pll_off(intel_dp);
+	}
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (WARN_ON(dp_reg & DP_PORT_EN))
+		return;
 
 	ironlake_edp_panel_vdd_on(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1309,47 +1348,14 @@
 	ironlake_edp_panel_vdd_off(intel_dp, true);
 	intel_dp_complete_link_train(intel_dp);
 	ironlake_edp_backlight_on(intel_dp);
-
-	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
-	if (HAS_PCH_CPT(dev))
-		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 }
 
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	if (mode != DRM_MODE_DPMS_ON) {
-		/* Switching the panel off requires vdd. */
-		ironlake_edp_panel_vdd_on(intel_dp);
-		ironlake_edp_backlight_off(intel_dp);
-		intel_dp_sink_dpms(intel_dp, mode);
-		ironlake_edp_panel_off(intel_dp);
-		intel_dp_link_down(intel_dp);
-
-		if (is_cpu_edp(intel_dp))
-			ironlake_edp_pll_off(encoder);
-	} else {
-		if (is_cpu_edp(intel_dp))
-			ironlake_edp_pll_on(encoder);
-
-		ironlake_edp_panel_vdd_on(intel_dp);
-		intel_dp_sink_dpms(intel_dp, mode);
-		if (!(dp_reg & DP_PORT_EN)) {
-			intel_dp_start_link_train(intel_dp);
-			ironlake_edp_panel_on(intel_dp);
-			ironlake_edp_panel_vdd_off(intel_dp, true);
-			intel_dp_complete_link_train(intel_dp);
-		} else
-			ironlake_edp_panel_vdd_off(intel_dp, false);
-		ironlake_edp_backlight_on(intel_dp);
-	}
-	intel_dp->dpms_mode = mode;
+	if (is_cpu_edp(intel_dp))
+		ironlake_edp_pll_on(intel_dp);
 }
 
 /*
@@ -1668,6 +1674,45 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		}
+
+	} else {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		}
+	}
+
 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
 	POSTING_READ(intel_dp->output_reg);
 
@@ -1675,12 +1720,15 @@
 				    DP_TRAINING_PATTERN_SET,
 				    dp_train_pat);
 
-	ret = intel_dp_aux_native_write(intel_dp,
-					DP_TRAINING_LANE0_SET,
-					intel_dp->train_set,
-					intel_dp->lane_count);
-	if (ret != intel_dp->lane_count)
-		return false;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	    DP_TRAINING_PATTERN_DISABLE) {
+		ret = intel_dp_aux_native_write(intel_dp,
+						DP_TRAINING_LANE0_SET,
+						intel_dp->train_set,
+						intel_dp->lane_count);
+		if (ret != intel_dp->lane_count)
+			return false;
+	}
 
 	return true;
 }
@@ -1690,26 +1738,12 @@
 intel_dp_start_link_train(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
 	int i;
 	uint8_t voltage;
 	bool clock_recovery = false;
 	int voltage_tries, loop_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
-	/*
-	 * On CPT we have to enable the port in training pattern 1, which
-	 * will happen below in intel_dp_set_link_train.  Otherwise, enable
-	 * the port and wait for it to become active.
-	 */
-	if (!HAS_PCH_CPT(dev)) {
-		I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-		POSTING_READ(intel_dp->output_reg);
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-	}
-
 	/* Write the link configuration data */
 	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
 				  intel_dp->link_configuration,
@@ -1717,10 +1751,6 @@
 
 	DP |= DP_PORT_EN;
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		DP &= ~DP_LINK_TRAIN_MASK_CPT;
-	else
-		DP &= ~DP_LINK_TRAIN_MASK;
 	memset(intel_dp->train_set, 0, 4);
 	voltage = 0xff;
 	voltage_tries = 0;
@@ -1744,12 +1774,7 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_1;
-
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_1 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1804,10 +1829,8 @@
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
 	int tries, cr_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
@@ -1836,13 +1859,8 @@
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_2;
-
 		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_2 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1877,15 +1895,7 @@
 		++tries;
 	}
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		reg = DP | DP_LINK_TRAIN_OFF_CPT;
-	else
-		reg = DP | DP_LINK_TRAIN_OFF;
-
-	I915_WRITE(intel_dp->output_reg, reg);
-	POSTING_READ(intel_dp->output_reg);
-	intel_dp_aux_native_write_1(intel_dp,
-				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -1895,18 +1905,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t DP = intel_dp->DP;
 
-	if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
 		return;
 
 	DRM_DEBUG_KMS("\n");
 
-	if (is_edp(intel_dp)) {
-		DP &= ~DP_PLL_ENABLE;
-		I915_WRITE(intel_dp->output_reg, DP);
-		POSTING_READ(intel_dp->output_reg);
-		udelay(100);
-	}
-
 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1918,13 +1921,6 @@
 
 	msleep(17);
 
-	if (is_edp(intel_dp)) {
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			DP |= DP_LINK_TRAIN_OFF_CPT;
-		else
-			DP |= DP_LINK_TRAIN_OFF;
-	}
-
 	if (HAS_PCH_IBX(dev) &&
 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
 		struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2033,10 +2029,10 @@
 	u8 sink_irq_vector;
 	u8 link_status[DP_LINK_STATUS_SIZE];
 
-	if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+	if (!intel_dp->base.connectors_active)
 		return;
 
-	if (!intel_dp->base.base.crtc)
+	if (WARN_ON(!intel_dp->base.base.crtc))
 		return;
 
 	/* Try to read receiver status if the link appears to be up */
@@ -2160,7 +2156,6 @@
 		ret = drm_add_edid_modes(connector, intel_dp->edid);
 		drm_edid_to_eld(connector,
 				intel_dp->edid);
-		connector->display_info.raw_edid = NULL;
 		return intel_dp->edid_mode_count;
 	}
 
@@ -2206,7 +2201,6 @@
 		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 		if (edid) {
 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		}
 	}
@@ -2271,8 +2265,6 @@
 	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 	if (edid) {
 		has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -2326,9 +2318,8 @@
 done:
 	if (intel_dp->base.base.crtc) {
 		struct drm_crtc *crtc = intel_dp->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-					 crtc->x, crtc->y,
-					 crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -2362,15 +2353,13 @@
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
-	.dpms = intel_dp_dpms,
 	.mode_fixup = intel_dp_mode_fixup,
-	.prepare = intel_dp_prepare,
 	.mode_set = intel_dp_mode_set,
-	.commit = intel_dp_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_dp_set_property,
@@ -2441,7 +2430,7 @@
 }
 
 void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -2456,7 +2445,9 @@
 		return;
 
 	intel_dp->output_reg = output_reg;
-	intel_dp->dpms_mode = -1;
+	intel_dp->port = port;
+	/* Preserve the current hw state. */
+	intel_dp->DP = I915_READ(intel_dp->output_reg);
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
@@ -2483,18 +2474,10 @@
 
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-	if (output_reg == DP_B || output_reg == PCH_DP_B)
-		intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
-	else if (output_reg == DP_C || output_reg == PCH_DP_C)
-		intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
-	else if (output_reg == DP_D || output_reg == PCH_DP_D)
-		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+	intel_encoder->cloneable = false;
 
-	if (is_edp(intel_dp)) {
-		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
-		INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-				  ironlake_panel_vdd_work);
-	}
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+			  ironlake_panel_vdd_work);
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
@@ -2508,29 +2491,33 @@
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	drm_sysfs_connector_add(connector);
 
+	intel_encoder->enable = intel_enable_dp;
+	intel_encoder->pre_enable = intel_pre_enable_dp;
+	intel_encoder->disable = intel_disable_dp;
+	intel_encoder->post_disable = intel_post_disable_dp;
+	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	/* Set up the DDC bus. */
-	switch (output_reg) {
-		case DP_A:
-			name = "DPDDC-A";
-			break;
-		case DP_B:
-		case PCH_DP_B:
-			dev_priv->hotplug_supported_mask |=
-				DPB_HOTPLUG_INT_STATUS;
-			name = "DPDDC-B";
-			break;
-		case DP_C:
-		case PCH_DP_C:
-			dev_priv->hotplug_supported_mask |=
-				DPC_HOTPLUG_INT_STATUS;
-			name = "DPDDC-C";
-			break;
-		case DP_D:
-		case PCH_DP_D:
-			dev_priv->hotplug_supported_mask |=
-				DPD_HOTPLUG_INT_STATUS;
-			name = "DPDDC-D";
-			break;
+	switch (port) {
+	case PORT_A:
+		name = "DPDDC-A";
+		break;
+	case PORT_B:
+		dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+		name = "DPDDC-B";
+		break;
+	case PORT_C:
+		dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+		name = "DPDDC-C";
+		break;
+	case PORT_D:
+		dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+		name = "DPDDC-D";
+		break;
+	default:
+		WARN(1, "Invalid port %c\n", port_name(port));
+		break;
 	}
 
 	/* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd54cf8..351fd71 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
+#include "drm_dp_helper.h"
 
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
@@ -40,7 +41,11 @@
 			ret__ = -ETIMEDOUT;				\
 			break;						\
 		}							\
-		if (W && drm_can_sleep()) msleep(W);	\
+		if (W && drm_can_sleep())  {				\
+			msleep(W);					\
+		} else {						\
+			cpu_relax();					\
+		}							\
 	}								\
 	ret__;								\
 })
@@ -90,25 +95,6 @@
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
 
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@
 
 struct intel_encoder {
 	struct drm_encoder base;
+	/*
+	 * The new crtc this encoder will be driven from. Only differs from
+	 * base->crtc while a modeset is in progress.
+	 */
+	struct intel_crtc *new_crtc;
+
 	int type;
 	bool needs_tv_clock;
+	/*
+	 * Intel hw has only one MUX where encoders could be clone, hence a
+	 * simple flag is enough to compute the possible_clones mask.
+	 */
+	bool cloneable;
+	bool connectors_active;
 	void (*hot_plug)(struct intel_encoder *);
+	void (*pre_enable)(struct intel_encoder *);
+	void (*enable)(struct intel_encoder *);
+	void (*disable)(struct intel_encoder *);
+	void (*post_disable)(struct intel_encoder *);
+	/* Read out the current hw state of this connector, returning true if
+	 * the encoder is active. If the encoder is enabled it also set the pipe
+	 * it is connected to in the pipe parameter. */
+	bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
 	int crtc_mask;
-	int clone_mask;
 };
 
 struct intel_connector {
 	struct drm_connector base;
+	/*
+	 * The fixed encoder this connector is connected to.
+	 */
 	struct intel_encoder *encoder;
+
+	/*
+	 * The new encoder this connector will be driven. Only differs from
+	 * encoder while a modeset is in progress.
+	 */
+	struct intel_encoder *new_encoder;
+
+	/* Reads out the current hw, returning true if the connector is enabled
+	 * and active (i.e. dpms ON state). */
+	bool (*get_hw_state)(struct intel_connector *);
 };
 
 struct intel_crtc {
@@ -168,11 +186,13 @@
 	enum pipe pipe;
 	enum plane plane;
 	u8 lut_r[256], lut_g[256], lut_b[256];
-	int dpms_mode;
-	bool active; /* is the crtc on? independent of the dpms mode */
+	/*
+	 * Whether the crtc and the connected output pipeline is active. Implies
+	 * that crtc->enabled is set, i.e. the current mode configuration has
+	 * some outputs connected to this crtc.
+	 */
+	bool active;
 	bool primary_disabled; /* is the crtc obscured by a plane? */
-	bool busy; /* is scanout buffer being updated frequently? */
-	struct timer_list idle_timer;
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@
 			       struct drm_display_mode *adjusted_mode);
 };
 
+#define DP_RECEIVER_CAP_SIZE		0xf
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+struct intel_dp {
+	struct intel_encoder base;
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	enum port port;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	bool is_pch_edp;
+	uint8_t train_set[4];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	struct edid *edid; /* cached EDID for eDP */
+	int edid_mode_count;
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -350,17 +401,21 @@
 extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
 extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+			    int sdvox_reg, enum port port);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 			    bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
-			    struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+			  enum port port);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
 
-void intel_sanitize_pm(struct drm_device *dev);
-
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@
 extern void intel_panel_destroy_backlight(struct drm_device *dev);
 extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
+struct intel_set_config {
+	struct drm_encoder **save_connector_encoders;
+	struct drm_crtc **save_encoder_crtcs;
+
+	bool fb_changed;
+	bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			   int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
 
 static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
 {
@@ -417,12 +487,10 @@
 	bool load_detect_temp;
 	int dpms_mode;
 };
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
-				       struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				       struct drm_display_mode *mode,
 				       struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
-					   struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
 					   struct intel_load_detect_pipe *old);
 
 extern void intelfb_restore(void);
@@ -503,7 +571,10 @@
 extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
 extern void ironlake_teardown_rc6(struct drm_device *dev);
 
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe);
 extern void intel_ddi_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 36c542e..4f1fdcc 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -37,6 +37,7 @@
 #define SIL164_ADDR	0x38
 #define CH7xxx_ADDR	0x76
 #define TFP410_ADDR	0x38
+#define NS2501_ADDR     0x38
 
 static const struct intel_dvo_device intel_dvo_devices[] = {
 	{
@@ -74,7 +75,14 @@
 		.slave_addr = 0x75,
 		.gpio = GMBUS_PORT_DPB,
 		.dev_ops = &ch7017_ops,
-	}
+	},
+	{
+	        .type = INTEL_DVO_CHIP_TMDS,
+		.name = "ns2501",
+		.dvo_reg = DVOC,
+		.slave_addr = NS2501_ADDR,
+		.dev_ops = &ns2501_ops,
+       }
 };
 
 struct intel_dvo {
@@ -97,22 +105,91 @@
 			    struct intel_dvo, base);
 }
 
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
-	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+	return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
 	u32 dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
-	if (mode == DRM_MODE_DPMS_ON) {
-		I915_WRITE(dvo_reg, temp | DVO_ENABLE);
-		I915_READ(dvo_reg);
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
-	} else {
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
-		I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
-		I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+	I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+	I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+	I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct drm_crtc *crtc;
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_dvo->base.base.crtc;
+	if (!crtc) {
+		intel_dvo->base.connectors_active = false;
+		return;
 	}
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		intel_dvo->base.connectors_active = true;
+
+		intel_crtc_update_dpms(crtc);
+
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+	} else {
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+		intel_dvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
+	}
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -267,15 +344,13 @@
 }
 
 static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
-	.dpms = intel_dvo_dpms,
 	.mode_fixup = intel_dvo_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_dvo_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_dvo_dpms,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -364,6 +439,11 @@
 	drm_encoder_init(dev, &intel_encoder->base,
 			 &intel_dvo_enc_funcs, encoder_type);
 
+	intel_encoder->disable = intel_disable_dvo;
+	intel_encoder->enable = intel_enable_dvo;
+	intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+	intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
 	/* Now, try to find a controller */
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
 		struct drm_connector *connector = &intel_connector->base;
@@ -396,17 +476,14 @@
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 		switch (dvo->type) {
 		case INTEL_DVO_CHIP_TMDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_TMDS_CLONE_BIT) |
-				(1 << INTEL_ANALOG_CLONE_BIT);
+			intel_encoder->cloneable = true;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_DVII);
 			encoder_type = DRM_MODE_ENCODER_TMDS;
 			break;
 		case INTEL_DVO_CHIP_LVDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_LVDS_CLONE_BIT);
+			intel_encoder->cloneable = false;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 12dc330..f9fb47c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -377,6 +377,7 @@
 		port = VIDEO_DIP_PORT_C;
 		break;
 	default:
+		BUG();
 		return;
 	}
 
@@ -435,6 +436,7 @@
 		port = VIDEO_DIP_PORT_D;
 		break;
 	default:
+		BUG();
 		return;
 	}
 
@@ -601,15 +603,36 @@
 	intel_hdmi->set_infoframes(encoder, adjusted_mode);
 }
 
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
 	u32 enable_bits = SDVO_ENABLE;
 
-	if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+	if (intel_hdmi->has_audio)
 		enable_bits |= SDVO_AUDIO_ENABLE;
 
 	temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -617,30 +640,67 @@
 	/* HW workaround for IBX, we need to move the port to transcoder A
 	 * before disabling it. */
 	if (HAS_PCH_IBX(dev)) {
-		struct drm_crtc *crtc = encoder->crtc;
+		struct drm_crtc *crtc = encoder->base.crtc;
 		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
-		if (mode != DRM_MODE_DPMS_ON) {
-			if (temp & SDVO_PIPE_B_SELECT) {
-				temp &= ~SDVO_PIPE_B_SELECT;
-				I915_WRITE(intel_hdmi->sdvox_reg, temp);
-				POSTING_READ(intel_hdmi->sdvox_reg);
+		/* Restore the transcoder select bit. */
+		if (pipe == PIPE_B)
+			enable_bits |= SDVO_PIPE_B_SELECT;
+	}
 
-				/* Again we need to write this twice. */
-				I915_WRITE(intel_hdmi->sdvox_reg, temp);
-				POSTING_READ(intel_hdmi->sdvox_reg);
+	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
+	 * we do this anyway which shows more stable in testing.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->sdvox_reg);
+	}
 
-				/* Transcoder selection bits only update
-				 * effectively on vblank. */
-				if (crtc)
-					intel_wait_for_vblank(dev, pipe);
-				else
-					msleep(50);
-			}
-		} else {
-			/* Restore the transcoder select bit. */
-			if (pipe == PIPE_B)
-				enable_bits |= SDVO_PIPE_B_SELECT;
+	temp |= enable_bits;
+
+	I915_WRITE(intel_hdmi->sdvox_reg, temp);
+	POSTING_READ(intel_hdmi->sdvox_reg);
+
+	/* HW workaround, need to write this twice for issue that may result
+	 * in first write getting masked.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->sdvox_reg, temp);
+		POSTING_READ(intel_hdmi->sdvox_reg);
+	}
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+	u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+	temp = I915_READ(intel_hdmi->sdvox_reg);
+
+	/* HW workaround for IBX, we need to move the port to transcoder A
+	 * before disabling it. */
+	if (HAS_PCH_IBX(dev)) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+		if (temp & SDVO_PIPE_B_SELECT) {
+			temp &= ~SDVO_PIPE_B_SELECT;
+			I915_WRITE(intel_hdmi->sdvox_reg, temp);
+			POSTING_READ(intel_hdmi->sdvox_reg);
+
+			/* Again we need to write this twice. */
+			I915_WRITE(intel_hdmi->sdvox_reg, temp);
+			POSTING_READ(intel_hdmi->sdvox_reg);
+
+			/* Transcoder selection bits only update
+			 * effectively on vblank. */
+			if (crtc)
+				intel_wait_for_vblank(dev, pipe);
+			else
+				msleep(50);
 		}
 	}
 
@@ -652,11 +712,7 @@
 		POSTING_READ(intel_hdmi->sdvox_reg);
 	}
 
-	if (mode != DRM_MODE_DPMS_ON) {
-		temp &= ~enable_bits;
-	} else {
-		temp |= enable_bits;
-	}
+	temp &= ~enable_bits;
 
 	I915_WRITE(intel_hdmi->sdvox_reg, temp);
 	POSTING_READ(intel_hdmi->sdvox_reg);
@@ -737,7 +793,6 @@
 						drm_detect_hdmi_monitor(edid);
 			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
 		}
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -778,8 +833,6 @@
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL)
 			has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -833,9 +886,8 @@
 done:
 	if (intel_hdmi->base.base.crtc) {
 		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-					 crtc->x, crtc->y,
-					 crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -849,23 +901,19 @@
 }
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
-	.dpms = intel_ddi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_ddi_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
-	.dpms = intel_hdmi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_hdmi_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_hdmi_set_property,
@@ -889,7 +937,7 @@
 	intel_attach_broadcast_rgb_property(connector);
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -923,48 +971,25 @@
 	connector->doublescan_allowed = 0;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-	/* Set up the DDC bus. */
-	if (sdvox_reg == SDVOB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+	intel_encoder->cloneable = false;
+
+	intel_hdmi->ddi_port = port;
+	switch (port) {
+	case PORT_B:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == SDVOC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+		break;
+	case PORT_C:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMID) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+		break;
+	case PORT_D:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		intel_hdmi->ddi_port = PORT_B;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		intel_hdmi->ddi_port = PORT_C;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
-		intel_hdmi->ddi_port = PORT_D;
-		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else {
-		/* If we got an unknown sdvox_reg, things are pretty much broken
-		 * in a way that we should let the kernel know about it */
+		break;
+	case PORT_A:
+		/* Internal port only for eDP. */
+	default:
 		BUG();
 	}
 
@@ -987,10 +1012,21 @@
 		intel_hdmi->set_infoframes = cpt_set_infoframes;
 	}
 
-	if (IS_HASWELL(dev))
-		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
-	else
-		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+	if (IS_HASWELL(dev)) {
+		intel_encoder->enable = intel_enable_ddi;
+		intel_encoder->disable = intel_disable_ddi;
+		intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+		drm_encoder_helper_add(&intel_encoder->base,
+				       &intel_hdmi_helper_funcs_hsw);
+	} else {
+		intel_encoder->enable = intel_enable_hdmi;
+		intel_encoder->disable = intel_disable_hdmi;
+		intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+		drm_encoder_helper_add(&intel_encoder->base,
+				       &intel_hdmi_helper_funcs);
+	}
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e9a6f6a..40d72bd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -65,13 +65,40 @@
 			    struct intel_lvds, base);
 }
 
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lvds_reg, tmp;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		lvds_reg = PCH_LVDS;
+	} else {
+		lvds_reg = LVDS;
+	}
+
+	tmp = I915_READ(lvds_reg);
+
+	if (!(tmp & LVDS_PORT_EN))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
 /**
  * Sets the power state for the panel.
  */
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = intel_lvds->base.base.dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -111,9 +138,10 @@
 	intel_panel_enable_backlight(dev, intel_crtc->pipe);
 }
 
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = intel_lvds->base.base.dev;
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -142,18 +170,6 @@
 	POSTING_READ(lvds_reg);
 }
 
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	if (mode == DRM_MODE_DPMS_ON)
-		intel_lvds_enable(intel_lvds);
-	else
-		intel_lvds_disable(intel_lvds);
-
-	/* XXX: We never power down the LVDS pairs. */
-}
-
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
@@ -234,9 +250,8 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-	struct intel_encoder *tmp_encoder;
+	struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 	int pipe;
 
@@ -246,14 +261,8 @@
 		return false;
 	}
 
-	/* Should never happen!! */
-	for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
-		if (&tmp_encoder->base != encoder) {
-			DRM_ERROR("Can't enable LVDS and another "
-			       "encoder on the same pipe\n");
-			return false;
-		}
-	}
+	if (intel_encoder_check_is_cloned(&intel_lvds->base))
+		return false;
 
 	/*
 	 * We have timings from the BIOS for the panel, put them in
@@ -405,23 +414,6 @@
 	return true;
 }
 
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	/* Always do a full power on as we do not know what state
-	 * we were left in.
-	 */
-	intel_lvds_enable(intel_lvds);
-}
-
 static void intel_lvds_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -535,7 +527,7 @@
 	dev_priv->modeset_on_lid = 0;
 
 	mutex_lock(&dev->mode_config.mutex);
-	drm_helper_resume_force_mode(dev);
+	intel_modeset_check_state(dev);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	return NOTIFY_OK;
@@ -587,8 +579,8 @@
 			 * If the CRTC is enabled, the display will be changed
 			 * according to the new panel fitting mode.
 			 */
-			drm_crtc_helper_set_mode(crtc, &crtc->mode,
-				crtc->x, crtc->y, crtc->fb);
+			intel_set_mode(crtc, &crtc->mode,
+				       crtc->x, crtc->y, crtc->fb);
 		}
 	}
 
@@ -596,11 +588,9 @@
 }
 
 static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
-	.dpms = intel_lvds_dpms,
 	.mode_fixup = intel_lvds_mode_fixup,
-	.prepare = intel_lvds_prepare,
 	.mode_set = intel_lvds_mode_set,
-	.commit = intel_lvds_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -610,7 +600,7 @@
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -972,10 +962,15 @@
 	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 
+	intel_encoder->enable = intel_enable_lvds;
+	intel_encoder->disable = intel_disable_lvds;
+	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
-	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	if (HAS_PCH_SPLIT(dev))
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 29b7259..4bc1c0f 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 	drm_edid_to_eld(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 18bd0af..e27c170 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@
 	goto end;
 }
 
+static void intel_setup_cadls(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int i = 0;
+	u32 disp_id;
+
+	/* Initialize the CADL field by duplicating the DIDL values.
+	 * Technically, this is not always correct as display outputs may exist,
+	 * but not active. This initialization is necessary for some Clevo
+	 * laptops that check this field before processing the brightness and
+	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+	 * there are less than eight devices. */
+	do {
+		disp_id = ioread32(&opregion->acpi->didl[i]);
+		iowrite32(disp_id, &opregion->acpi->cadl[i]);
+	} while (++i < 8 && disp_id != 0);
+}
+
 void intel_opregion_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@
 		return;
 
 	if (opregion->acpi) {
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
+		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 			intel_didl_outputs(dev);
+			intel_setup_cadls(dev);
+		}
 
 		/* Notify BIOS we are ready to handle ACPI video ext notifs.
 		 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 830d0dd..afd0f30 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,54 +235,6 @@
 	return 0;
 }
 
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
-	struct drm_crtc_helper_funcs *crtc_funcs;
-	struct drm_display_mode vesa_640x480 = {
-		DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-			 752, 800, 0, 480, 489, 492, 525, 0,
-			 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
-	}, *mode;
-
-	crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
-	if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
-		return 0;
-
-	/* most i8xx have pipe a forced on, so don't trust dpms mode */
-	if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
-		return 0;
-
-	crtc_funcs = crtc->base.helper_private;
-	if (crtc_funcs->dpms == NULL)
-		return 0;
-
-	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
-	mode = drm_mode_duplicate(dev, &vesa_640x480);
-
-	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
-				       crtc->base.x, crtc->base.y,
-				       crtc->base.fb))
-		return 0;
-
-	crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
-	return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
@@ -290,17 +242,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *request;
-	int pipe_a_quirk = 0;
 	int ret;
 
 	BUG_ON(overlay->active);
 	overlay->active = 1;
 
-	if (IS_I830(dev)) {
-		pipe_a_quirk = i830_activate_pipe_a(dev);
-		if (pipe_a_quirk < 0)
-			return pipe_a_quirk;
-	}
+	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
 	request = kzalloc(sizeof(*request), GFP_KERNEL);
 	if (request == NULL) {
@@ -322,9 +269,6 @@
 
 	ret = intel_overlay_do_wait_request(overlay, request, NULL);
 out:
-	if (pipe_a_quirk)
-		i830_deactivate_pipe_a(dev);
-
 	return ret;
 }
 
@@ -1439,7 +1383,7 @@
 		}
 		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
-		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
 		if (ret) {
 			DRM_ERROR("failed to pin overlay register bo\n");
 			goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b..a3e4f8b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
 
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@
 		break;
 	}
 
-	dev_priv->r_t = dev_priv->mem_freq;
+	dev_priv->ips.r_t = dev_priv->mem_freq;
 
 	switch (csipll & 0x3ff) {
 	case 0x00c:
@@ -625,11 +627,11 @@
 	}
 
 	if (dev_priv->fsb_freq == 3200) {
-		dev_priv->c_m = 0;
+		dev_priv->ips.c_m = 0;
 	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-		dev_priv->c_m = 1;
+		dev_priv->ips.c_m = 1;
 	} else {
-		dev_priv->c_m = 2;
+		dev_priv->ips.c_m = 2;
 	}
 }
 
@@ -2138,7 +2140,7 @@
 		return NULL;
 	}
 
-	ret = i915_gem_object_pin(ctx, 4096, true);
+	ret = i915_gem_object_pin(ctx, 4096, true, false);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
@@ -2160,11 +2162,22 @@
 	return NULL;
 }
 
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
 bool ironlake_set_drps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u16 rgvswctl;
 
+	assert_spin_locked(&mchdev_lock);
+
 	rgvswctl = I915_READ16(MEMSWCTL);
 	if (rgvswctl & MEMCTL_CMD_STS) {
 		DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
 	u8 fmax, fmin, fstart, vstart;
 
+	spin_lock_irq(&mchdev_lock);
+
 	/* Enable temp reporting */
 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@
 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 		PXVFREQ_PX_SHIFT;
 
-	dev_priv->fmax = fmax; /* IPS callback will increase this */
-	dev_priv->fstart = fstart;
+	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+	dev_priv->ips.fstart = fstart;
 
-	dev_priv->max_delay = fstart;
-	dev_priv->min_delay = fmin;
-	dev_priv->cur_delay = fstart;
+	dev_priv->ips.max_delay = fstart;
+	dev_priv->ips.min_delay = fmin;
+	dev_priv->ips.cur_delay = fstart;
 
 	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
 			 fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@
 	rgvmodectl |= MEMMODE_SWMODE_EN;
 	I915_WRITE(MEMMODECTL, rgvmodectl);
 
-	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
 		DRM_ERROR("stuck trying to change perf mode\n");
-	msleep(1);
+	mdelay(1);
 
 	ironlake_set_drps(dev, fstart);
 
-	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
 		I915_READ(0x112e0);
-	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
-	dev_priv->last_count2 = I915_READ(0x112f4);
-	getrawmonotonic(&dev_priv->last_time2);
+	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+	dev_priv->ips.last_count2 = I915_READ(0x112f4);
+	getrawmonotonic(&dev_priv->ips.last_time2);
+
+	spin_unlock_irq(&mchdev_lock);
 }
 
 static void ironlake_disable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
 
 	/* Ack interrupts, disable EFC interrupt */
 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@
 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
 	/* Go back to the starting frequency */
-	ironlake_set_drps(dev, dev_priv->fstart);
-	msleep(1);
+	ironlake_set_drps(dev, dev_priv->ips.fstart);
+	mdelay(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	I915_WRITE(MEMSWCTL, rgvswctl);
-	msleep(1);
+	mdelay(1);
 
+	spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+{
+	u32 limits;
+
+	limits = 0;
+
+	if (*val >= dev_priv->rps.max_delay)
+		*val = dev_priv->rps.max_delay;
+	limits |= dev_priv->rps.max_delay << 24;
+
+	/* Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt. */
+	if (*val <= dev_priv->rps.min_delay) {
+		*val = dev_priv->rps.min_delay;
+		limits |= dev_priv->rps.min_delay << 16;
+	}
+
+	return limits;
 }
 
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits;
+	u32 limits = gen6_rps_limits(dev_priv, &val);
 
-	limits = 0;
-	if (val >= dev_priv->max_delay)
-		val = dev_priv->max_delay;
-	else
-		limits |= dev_priv->max_delay << 24;
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(val > dev_priv->rps.max_delay);
+	WARN_ON(val < dev_priv->rps.min_delay);
 
-	if (val <= dev_priv->min_delay)
-		val = dev_priv->min_delay;
-	else
-		limits |= dev_priv->min_delay << 16;
-
-	if (val == dev_priv->cur_delay)
+	if (val == dev_priv->rps.cur_delay)
 		return;
 
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@
 	 */
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 
-	dev_priv->cur_delay = val;
+	POSTING_READ(GEN6_RPNSWREQ);
+
+	dev_priv->rps.cur_delay = val;
+
+	trace_intel_gpu_freq_change(val * 50);
 }
 
 static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	dev_priv->pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 }
 
 int intel_enable_rc6(const struct drm_device *dev)
 {
-	/*
-	 * Respect the kernel parameter if it is set
-	 */
+	/* Respect the kernel parameter if it is set */
 	if (i915_enable_rc6 >= 0)
 		return i915_enable_rc6;
 
-	/*
-	 * Disable RC6 on Ironlake
-	 */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
-	/* On Haswell, only RC6 is available. So let's enable it by default to
-	 * provide better testing and coverage since the beginning.
-	 */
-	if (IS_HASWELL(dev))
+	if (INTEL_INFO(dev)->gen == 5) {
+#ifdef CONFIG_INTEL_IOMMU
+		/* Disable rc6 on ilk if VT-d is on. */
+		if (intel_iommu_gfx_mapped)
+			return false;
+#endif
+		DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
 		return INTEL_RC6_ENABLE;
+	}
 
-	/*
-	 * Disable rc6 on Sandybridge
-	 */
+	if (IS_HASWELL(dev)) {
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+		return INTEL_RC6_ENABLE;
+	}
+
+	/* snb/ivb have more than one rc6 state. */
 	if (INTEL_INFO(dev)->gen == 6) {
 		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 		return INTEL_RC6_ENABLE;
 	}
+
 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
@@ -2383,9 +2431,9 @@
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
 	/* In units of 100MHz */
-	dev_priv->max_delay = rp_state_cap & 0xff;
-	dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
-	dev_priv->cur_delay = 0;
+	dev_priv->rps.max_delay = rp_state_cap & 0xff;
+	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->max_delay << 24 |
-		   dev_priv->min_delay << 16);
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 	if (pcu_mbox & (1<<31)) { /* OC supported */
-		dev_priv->max_delay = pcu_mbox & 0xff;
+		dev_priv->rps.max_delay = pcu_mbox & 0xff;
 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 	}
 
@@ -2485,10 +2533,10 @@
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
-	spin_lock_irq(&dev_priv->rps_lock);
-	WARN_ON(dev_priv->pm_iir != 0);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
@@ -2520,9 +2568,9 @@
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
 	     gpu_freq--) {
-		int diff = dev_priv->max_delay - gpu_freq;
+		int diff = dev_priv->rps.max_delay - gpu_freq;
 
 		/*
 		 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2693,7 +2741,9 @@
 	unsigned long now = jiffies_to_msecs(jiffies), diff1;
 	int i;
 
-	diff1 = now - dev_priv->last_time1;
+	assert_spin_locked(&mchdev_lock);
+
+	diff1 = now - dev_priv->ips.last_time1;
 
 	/* Prevent division-by-zero if we are asking too fast.
 	 * Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@
 	 * in such cases.
 	 */
 	if (diff1 <= 10)
-		return dev_priv->chipset_power;
+		return dev_priv->ips.chipset_power;
 
 	count1 = I915_READ(DMIEC);
 	count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@
 	total_count = count1 + count2 + count3;
 
 	/* FIXME: handle per-counter overflow */
-	if (total_count < dev_priv->last_count1) {
-		diff = ~0UL - dev_priv->last_count1;
+	if (total_count < dev_priv->ips.last_count1) {
+		diff = ~0UL - dev_priv->ips.last_count1;
 		diff += total_count;
 	} else {
-		diff = total_count - dev_priv->last_count1;
+		diff = total_count - dev_priv->ips.last_count1;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
-		if (cparams[i].i == dev_priv->c_m &&
-		    cparams[i].t == dev_priv->r_t) {
+		if (cparams[i].i == dev_priv->ips.c_m &&
+		    cparams[i].t == dev_priv->ips.r_t) {
 			m = cparams[i].m;
 			c = cparams[i].c;
 			break;
@@ -2730,10 +2780,10 @@
 	ret = ((m * diff) + c);
 	ret = div_u64(ret, 10);
 
-	dev_priv->last_count1 = total_count;
-	dev_priv->last_time1 = now;
+	dev_priv->ips.last_count1 = total_count;
+	dev_priv->ips.last_time1 = now;
 
-	dev_priv->chipset_power = ret;
+	dev_priv->ips.chipset_power = ret;
 
 	return ret;
 }
@@ -2894,18 +2944,17 @@
 		return v_table[pxvid].vd;
 }
 
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
 	struct timespec now, diff1;
 	u64 diff;
 	unsigned long diffms;
 	u32 count;
 
-	if (dev_priv->info->gen != 5)
-		return;
+	assert_spin_locked(&mchdev_lock);
 
 	getrawmonotonic(&now);
-	diff1 = timespec_sub(now, dev_priv->last_time2);
+	diff1 = timespec_sub(now, dev_priv->ips.last_time2);
 
 	/* Don't divide by 0 */
 	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,20 +2963,32 @@
 
 	count = I915_READ(GFXEC);
 
-	if (count < dev_priv->last_count2) {
-		diff = ~0UL - dev_priv->last_count2;
+	if (count < dev_priv->ips.last_count2) {
+		diff = ~0UL - dev_priv->ips.last_count2;
 		diff += count;
 	} else {
-		diff = count - dev_priv->last_count2;
+		diff = count - dev_priv->ips.last_count2;
 	}
 
-	dev_priv->last_count2 = count;
-	dev_priv->last_time2 = now;
+	dev_priv->ips.last_count2 = count;
+	dev_priv->ips.last_time2 = now;
 
 	/* More magic constants... */
 	diff = diff * 1181;
 	diff = div_u64(diff, diffms * 10);
-	dev_priv->gfx_power = diff;
+	dev_priv->ips.gfx_power = diff;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->info->gen != 5)
+		return;
+
+	spin_lock_irq(&mchdev_lock);
+
+	__i915_update_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
 }
 
 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -2935,7 +2996,9 @@
 	unsigned long t, corr, state1, corr2, state2;
 	u32 pxvid, ext_v;
 
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	assert_spin_locked(&mchdev_lock);
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
 	pxvid = (pxvid >> 24) & 0x7f;
 	ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -2955,28 +3018,16 @@
 
 	corr = corr * ((150142 * state1) / 10000 - 78642);
 	corr /= 100000;
-	corr2 = (corr * dev_priv->corr);
+	corr2 = (corr * dev_priv->ips.corr);
 
 	state2 = (corr2 * state1) / 10000;
 	state2 /= 100; /* convert to mW */
 
-	i915_update_gfx_val(dev_priv);
+	__i915_update_gfx_val(dev_priv);
 
-	return dev_priv->gfx_power + state2;
+	return dev_priv->ips.gfx_power + state2;
 }
 
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- *   - i915_mch_dev
- *   - dev_priv->max_delay
- *   - dev_priv->min_delay
- *   - dev_priv->fmax
- *   - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
 /**
  * i915_read_mch_val - return value for IPS use
  *
@@ -2988,7 +3039,7 @@
 	struct drm_i915_private *dev_priv;
 	unsigned long chipset_val, graphics_val, ret = 0;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
@@ -2999,7 +3050,7 @@
 	ret = chipset_val + graphics_val;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3015,18 +3066,18 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	if (dev_priv->max_delay > dev_priv->fmax)
-		dev_priv->max_delay--;
+	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+		dev_priv->ips.max_delay--;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3043,18 +3094,18 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	if (dev_priv->max_delay < dev_priv->min_delay)
-		dev_priv->max_delay++;
+	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+		dev_priv->ips.max_delay++;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3068,17 +3119,20 @@
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
+	struct intel_ring_buffer *ring;
 	bool ret = false;
+	int i;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	ret = dev_priv->busy;
+	for_each_ring(ring, dev_priv, i)
+		ret |= !list_empty(&ring->request_list);
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3095,20 +3149,20 @@
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	dev_priv->max_delay = dev_priv->fstart;
+	dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
-	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
 		ret = false;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3136,19 +3190,20 @@
 
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
 {
-	spin_lock(&mchdev_lock);
+	/* We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = dev_priv;
-	dev_priv->mchdev_lock = &mchdev_lock;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	ips_ping_for_i915_load();
 }
 
 void intel_gpu_ips_teardown(void)
 {
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = NULL;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 }
 static void intel_init_emon(struct drm_device *dev)
 {
@@ -3218,7 +3273,7 @@
 
 	lcfuse = I915_READ(LCFUSE02);
 
-	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
 void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3786,6 @@
 		dev_priv->display.init_pch_clock_gating(dev);
 }
 
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits, delay, old;
-
-	gen6_gt_force_wake_get(dev_priv);
-
-	old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
-	/* Make sure we continue to get interrupts
-	 * until we hit the minimum or maximum frequencies.
-	 */
-	limits &= ~(0x3f << 16 | 0x3f << 24);
-	delay = dev_priv->cur_delay;
-	if (delay < dev_priv->max_delay)
-		limits |= (dev_priv->max_delay & 0x3f) << 24;
-	if (delay > dev_priv->min_delay)
-		limits |= (dev_priv->min_delay & 0x3f) << 16;
-
-	if (old != limits) {
-		/* Note that the known failure case is to read back 0. */
-		DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
-				 "expected %08x, was %08x\n", limits, old);
-		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
-	}
-
-	gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.sanitize_pm)
-		dev_priv->display.sanitize_pm(dev);
-}
-
 /* Starting with Haswell, we have different power wells for
  * different parts of the GPU. This attempts to enable them all.
  */
@@ -3852,7 +3871,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_IVYBRIDGE(dev)) {
 			/* FIXME: detect B0+ stepping and use auto training */
 			if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3882,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_HASWELL(dev)) {
 			if (SNB_READ_WM0_LATENCY()) {
 				dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3893,6 @@
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +3971,16 @@
 	else
 		forcewake_ack = FORCEWAKE_ACK;
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE, 1);
-	POSTING_READ(FORCEWAKE);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
@@ -3976,14 +3994,16 @@
 	else
 		forcewake_ack = FORCEWAKE_MT_ACK;
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
-	POSTING_READ(FORCEWAKE_MT);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
@@ -4016,14 +4036,14 @@
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
-	POSTING_READ(FORCEWAKE);
+	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
-	POSTING_READ(FORCEWAKE_MT);
+	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
@@ -4062,24 +4082,24 @@
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 {
-	/* Already awake? */
-	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
-		return;
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
-	POSTING_READ(FORCEWAKE_VLV);
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
-	/* FIXME: confirm VLV behavior with Punit folks */
-	POSTING_READ(FORCEWAKE_VLV);
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+	/* The below doubles as a POSTING_READ */
+	gen6_gt_check_fifodbg(dev_priv);
 }
 
 void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2a73b3..984a0c5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -262,6 +262,83 @@
 	return 0;
 }
 
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/*
+	 * Ensure that any following seqno writes only happen when the render
+	 * cache is indeed flushed.
+	 *
+	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
+	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
+	 * don't try to be clever and just set it unconditionally.
+	 */
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+
+		/* Workaround: we must issue a pipe_control with CS-stall bit
+		 * set before a pipe_control command that has the state cache
+		 * invalidate bit set. */
+		gen7_render_ring_cs_stall_wa(ring);
+	}
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
 static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
@@ -382,12 +459,12 @@
 
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-	ret = i915_gem_object_pin(obj, 4096, true);
+	ret = i915_gem_object_pin(obj, 4096, true, false);
 	if (ret)
 		goto err_unref;
 
 	pc->gtt_offset = obj->gtt_offset;
-	pc->cpu_page =  kmap(obj->pages[0]);
+	pc->cpu_page =  kmap(sg_page(obj->pages->sgl));
 	if (pc->cpu_page == NULL)
 		goto err_unpin;
 
@@ -414,7 +491,8 @@
 		return;
 
 	obj = pc->obj;
-	kunmap(obj->pages[0]);
+
+	kunmap(sg_page(obj->pages->sgl));
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 
@@ -462,7 +540,7 @@
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (IS_IVYBRIDGE(dev))
+	if (HAS_L3_GPU_CACHE(dev))
 		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 
 	return ret;
@@ -628,26 +706,24 @@
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-	struct drm_device *dev = ring->dev;
-
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
 	 * ACTHD) before reading the status page. */
-	if (IS_GEN6(dev) || IS_GEN7(dev))
+	if (!lazy_coherency)
 		intel_ring_get_active_head(ring);
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	struct pipe_control *pc = ring->private;
 	return pc->cpu_page[0];
@@ -852,7 +928,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
 						GEN6_RENDER_L3_PARITY_ERROR));
 		else
@@ -875,7 +951,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 		else
 			I915_WRITE_IMR(ring, ~0);
@@ -951,7 +1027,7 @@
 	if (obj == NULL)
 		return;
 
-	kunmap(obj->pages[0]);
+	kunmap(sg_page(obj->pages->sgl));
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
@@ -972,13 +1048,13 @@
 
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-	ret = i915_gem_object_pin(obj, 4096, true);
+	ret = i915_gem_object_pin(obj, 4096, true, false);
 	if (ret != 0) {
 		goto err_unref;
 	}
 
 	ring->status_page.gfx_addr = obj->gtt_offset;
-	ring->status_page.page_addr = kmap(obj->pages[0]);
+	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
 	if (ring->status_page.page_addr == NULL) {
 		ret = -ENOMEM;
 		goto err_unpin;
@@ -1010,7 +1086,6 @@
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 	ring->size = 32 * PAGE_SIZE;
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1030,7 +1105,7 @@
 
 	ring->obj = obj;
 
-	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+	ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
 	if (ret)
 		goto err_unref;
 
@@ -1379,7 +1454,9 @@
 
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
-		ring->flush = gen6_render_ring_flush;
+		ring->flush = gen7_render_ring_flush;
+		if (INTEL_INFO(dev)->gen == 6)
+			ring->flush = gen6_render_ring_flush;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
 		ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1481,7 +1558,6 @@
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 
 	ring->size = size;
 	ring->effective_size = ring->size;
@@ -1574,3 +1650,41 @@
 
 	return intel_init_ring_buffer(dev, ring);
 }
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81f..2ea7a31 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@
 				  u32	flush_domains);
 	int		(*add_request)(struct intel_ring_buffer *ring,
 				       u32 *seqno);
-	u32		(*get_seqno)(struct intel_ring_buffer *ring);
+	/* Some chipsets are not quite as coherent as advertised and need
+	 * an expensive kick to force a true read of the up-to-date seqno.
+	 * However, the up-to-date seqno is not always required and the last
+	 * seen value is good enough. Note that the seqno will always be
+	 * monotonic, even if not coherent.
+	 */
+	u32		(*get_seqno)(struct intel_ring_buffer *ring,
+				     bool lazy_coherency);
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@
 	struct list_head request_list;
 
 	/**
-	 * List of objects currently pending a GPU write flush.
-	 *
-	 * All elements on this list will belong to either the
-	 * active_list or flushing_list, last_rendering_seqno can
-	 * be used to differentiate between the two elements.
-	 */
-	struct list_head gpu_write_list;
-
-	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
 	u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@
 void intel_ring_advance(struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 123afd3..39c3198 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -97,7 +97,7 @@
 	/*
 	 * Hotplug activation bits for this device
 	 */
-	uint8_t hotplug_active[2];
+	uint16_t hotplug_active;
 
 	/**
 	 * This is used to select the color range of RBG outputs in HDMI mode.
@@ -628,6 +628,14 @@
 				    &outputs, sizeof(outputs));
 }
 
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 *outputs)
+{
+	return intel_sdvo_get_value(intel_sdvo,
+				    SDVO_CMD_GET_ACTIVE_OUTPUTS,
+				    outputs, sizeof(*outputs));
+}
+
 static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
 					       int mode)
 {
@@ -1142,51 +1150,132 @@
 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
 
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
 {
-	struct drm_device *dev = encoder->dev;
+	struct intel_sdvo_connector *intel_sdvo_connector =
+		to_intel_sdvo_connector(&connector->base);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+	u16 active_outputs;
+
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (active_outputs & intel_sdvo_connector->output_flag)
+		return true;
+	else
+		return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 	u32 temp;
 
+	intel_sdvo_set_active_outputs(intel_sdvo, 0);
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_OFF);
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) != 0) {
+		intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+	}
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	u32 temp;
+	bool input1, input2;
+	int i;
+	u8 status;
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) == 0)
+		intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+	for (i = 0; i < 2; i++)
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+	/* Warn if the device reported failure to sync.
+	 * A lot of SDVO devices fail to notify of sync, but it's
+	 * a given it the status is a success, we succeeded.
+	 */
+	if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+		DRM_DEBUG_KMS("First %s output reported failure to "
+				"sync\n", SDVO_NAME(intel_sdvo));
+	}
+
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_ON);
+	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_crtc *crtc;
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_sdvo->base.base.crtc;
+	if (!crtc) {
+		intel_sdvo->base.connectors_active = false;
+		return;
+	}
+
 	if (mode != DRM_MODE_DPMS_ON) {
 		intel_sdvo_set_active_outputs(intel_sdvo, 0);
 		if (0)
 			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 
-		if (mode == DRM_MODE_DPMS_OFF) {
-			temp = I915_READ(intel_sdvo->sdvo_reg);
-			if ((temp & SDVO_ENABLE) != 0) {
-				intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
-			}
-		}
+		intel_sdvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
 	} else {
-		bool input1, input2;
-		int i;
-		u8 status;
+		intel_sdvo->base.connectors_active = true;
 
-		temp = I915_READ(intel_sdvo->sdvo_reg);
-		if ((temp & SDVO_ENABLE) == 0)
-			intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
-		for (i = 0; i < 2; i++)
-			intel_wait_for_vblank(dev, intel_crtc->pipe);
-
-		status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
-		/* Warn if the device reported failure to sync.
-		 * A lot of SDVO devices fail to notify of sync, but it's
-		 * a given it the status is a success, we succeeded.
-		 */
-		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
-			DRM_DEBUG_KMS("First %s output reported failure to "
-					"sync\n", SDVO_NAME(intel_sdvo));
-		}
+		intel_crtc_update_dpms(crtc);
 
 		if (0)
 			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 	}
-	return;
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1251,25 +1340,29 @@
 	return true;
 }
 
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 {
 	struct drm_device *dev = intel_sdvo->base.base.dev;
-	u8 response[2];
+	uint16_t hotplug;
 
 	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
 	 * on the line. */
 	if (IS_I945G(dev) || IS_I945GM(dev))
-		return false;
+		return 0;
 
-	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
-				    &response, 2) && response[0];
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+					&hotplug, sizeof(hotplug)))
+		return 0;
+
+	return hotplug;
 }
 
 static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
 {
 	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 
-	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+			&intel_sdvo->hotplug_active, 2);
 }
 
 static bool
@@ -1345,7 +1438,6 @@
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1419,7 +1511,6 @@
 			else
 				ret = connector_status_disconnected;
 
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1465,7 +1556,6 @@
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
@@ -1837,8 +1927,8 @@
 done:
 	if (intel_sdvo->base.base.crtc) {
 		struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-					 crtc->y, crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -1846,15 +1936,13 @@
 }
 
 static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
-	.dpms = intel_sdvo_dpms,
 	.mode_fixup = intel_sdvo_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_sdvo_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_sdvo_dpms,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
@@ -2026,6 +2114,7 @@
 	connector->base.base.interlace_allowed = 1;
 	connector->base.base.doublescan_allowed = 0;
 	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
 
 	intel_connector_attach_encoder(&connector->base, &encoder->base);
 	drm_sysfs_connector_add(&connector->base.base);
@@ -2064,17 +2153,18 @@
 
 	intel_connector = &intel_sdvo_connector->base;
 	connector = &intel_connector->base;
-	if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+	if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+		intel_sdvo_connector->output_flag) {
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
-		intel_sdvo->hotplug_active[0] |= 1 << device;
+		intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
 		/* Some SDVO devices have one-shot hotplug interrupts.
 		 * Ensure that they get re-enabled when an interrupt happens.
 		 */
 		intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
 		intel_sdvo_enable_hotplug(intel_encoder);
-	}
-	else
+	} else {
 		connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	}
 	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
@@ -2082,8 +2172,7 @@
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
 		intel_sdvo->is_hdmi = true;
 	}
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (intel_sdvo->is_hdmi)
@@ -2114,7 +2203,7 @@
 
 	intel_sdvo->is_tv = true;
 	intel_sdvo->base.needs_tv_clock = true;
-	intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+	intel_sdvo->base.cloneable = false;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2157,8 +2246,7 @@
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector,
 				  intel_sdvo);
@@ -2190,8 +2278,10 @@
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
-				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+	/* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+	 * as opposed to native LVDS, where we upscale with the panel-fitter
+	 * (and hence only the native LVDS resolution could be cloned). */
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2576,6 +2666,10 @@
 
 	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
+	intel_encoder->disable = intel_disable_sdvo;
+	intel_encoder->enable = intel_enable_sdvo;
+	intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
 	/* In default case sdvo lvds is false */
 	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
 		goto err;
@@ -2590,7 +2684,7 @@
 	/* Only enable the hotplug irq if we need it, to work around noisy
 	 * hotplug lines.
 	 */
-	if (intel_sdvo->hotplug_active[0])
+	if (intel_sdvo->hotplug_active)
 		dev_priv->hotplug_supported_mask |= hotplug_mask;
 
 	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index befce6c..d2c5c8f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -836,22 +836,37 @@
 			    base);
 }
 
-static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(TV_CTL);
+
+	if (!(tmp & TV_ENC_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void
+intel_enable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
-		break;
-	}
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
 }
 
 static const struct tv_mode *
@@ -895,17 +910,14 @@
 		    const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
-	struct drm_device *dev = encoder->dev;
 	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
-	struct intel_encoder *other_encoder;
 
 	if (!tv_mode)
 		return false;
 
-	for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
-		if (&other_encoder->base != encoder)
-			return false;
+	if (intel_encoder_check_is_cloned(&intel_tv->base))
+		return false;
 
 	adjusted_mode->clock = tv_mode->clock;
 	return true;
@@ -1303,12 +1315,9 @@
 	if (force) {
 		struct intel_load_detect_pipe tmp;
 
-		if (intel_get_load_detect_pipe(&intel_tv->base, connector,
-					       &mode, &tmp)) {
+		if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
 			type = intel_tv_detect_type(intel_tv, connector);
-			intel_release_load_detect_pipe(&intel_tv->base,
-						       connector,
-						       &tmp);
+			intel_release_load_detect_pipe(connector, &tmp);
 		} else
 			return connector_status_unknown;
 	} else
@@ -1474,22 +1483,20 @@
 	}
 
 	if (changed && crtc)
-		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-				crtc->y, crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 out:
 	return ret;
 }
 
 static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
-	.dpms = intel_tv_dpms,
 	.mode_fixup = intel_tv_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_tv_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
@@ -1619,10 +1626,15 @@
 	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
 			 DRM_MODE_ENCODER_TVDAC);
 
+	intel_encoder->enable = intel_enable_tv;
+	intel_encoder->disable = intel_disable_tv;
+	intel_encoder->get_hw_state = intel_tv_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
 	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 6f13b35..d22cbbf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b69642d..c7420e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1399,7 +1399,6 @@
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7e289d2..e754aa3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -289,9 +289,7 @@
 	if (ret)
 		return ret;
 
-	NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n",
-		     dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
-		     dp->stat[4], dp->stat[5]);
+	NV_DEBUG_KMS(dev, "status %*ph\n", 6, dp->stat);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3623b98..d48224b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -653,9 +653,7 @@
 		return false;
 	}
 
-	DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
-		  link_status[0], link_status[1], link_status[2],
-		  link_status[3], link_status[4], link_status[5]);
+	DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
new file mode 100644
index 0000000..7e7d52b
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -0,0 +1,10 @@
+config DRM_SHMOBILE
+	tristate "DRM Support for SH Mobile"
+	depends on DRM && (SUPERH || ARCH_SHMOBILE)
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  Choose this option if you have an SH Mobile chipset.
+	  If M is selected the module will be called shmob-drm.
+
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
new file mode 100644
index 0000000..4c3eeb3
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -0,0 +1,7 @@
+shmob-drm-y := shmob_drm_backlight.o \
+	       shmob_drm_crtc.o \
+	       shmob_drm_drv.o \
+	       shmob_drm_kms.o \
+	       shmob_drm_plane.o
+
+obj-$(CONFIG_DRM_SHMOBILE)	+= shmob-drm.o
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
new file mode 100644
index 0000000..463aee1
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -0,0 +1,90 @@
+/*
+ * shmob_drm_backlight.c  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+
+static int shmob_drm_backlight_update(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	int brightness = bdev->props.brightness;
+
+	if (bdev->props.power != FB_BLANK_UNBLANK ||
+	    bdev->props.state & BL_CORE_SUSPENDED)
+		brightness = 0;
+
+	return bdata->set_brightness(brightness);
+}
+
+static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+
+	return bdata->get_brightness();
+}
+
+static const struct backlight_ops shmob_drm_backlight_ops = {
+	.options	= BL_CORE_SUSPENDRESUME,
+	.update_status	= shmob_drm_backlight_update,
+	.get_brightness	= shmob_drm_backlight_get_brightness,
+};
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
+{
+	if (scon->backlight == NULL)
+		return;
+
+	scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
+				     ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+	backlight_update_status(scon->backlight);
+}
+
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
+{
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	struct drm_connector *connector = &scon->connector;
+	struct drm_device *dev = connector->dev;
+	struct backlight_device *backlight;
+
+	if (!bdata->max_brightness)
+		return 0;
+
+	backlight = backlight_device_register(bdata->name, dev->dev, scon,
+					      &shmob_drm_backlight_ops, NULL);
+	if (IS_ERR(backlight)) {
+		dev_err(dev->dev, "unable to register backlight device: %ld\n",
+			PTR_ERR(backlight));
+		return PTR_ERR(backlight);
+	}
+
+	backlight->props.max_brightness = bdata->max_brightness;
+	backlight->props.brightness = bdata->max_brightness;
+	backlight->props.power = FB_BLANK_POWERDOWN;
+	backlight_update_status(backlight);
+
+	scon->backlight = backlight;
+	return 0;
+}
+
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
+{
+	backlight_device_unregister(scon->backlight);
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
new file mode 100644
index 0000000..9477595
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -0,0 +1,23 @@
+/*
+ * shmob_drm_backlight.h  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_BACKLIGHT_H__
+#define __SHMOB_DRM_BACKLIGHT_H__
+
+struct shmob_drm_connector;
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
+
+#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
new file mode 100644
index 0000000..0e7a930
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -0,0 +1,763 @@
+/*
+ * shmob_drm_crtc.c  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/*
+ * TODO: panel support
+ */
+
+/* -----------------------------------------------------------------------------
+ * Clock management
+ */
+
+static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+{
+	if (sdev->clock)
+		clk_enable(sdev->clock);
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
+#endif
+}
+
+static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+{
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
+#endif
+	if (sdev->clock)
+		clk_disable(sdev->clock);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+
+static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct drm_display_mode *mode = &crtc->mode;
+	u32 value;
+
+	value = sdev->ldmt1r
+	      | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
+	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
+	lcdc_write(sdev, LDMT1R, value);
+
+	if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
+	    idata->interface <= SHMOB_DRM_IFACE_SYS24) {
+		/* Setup SYS bus. */
+		value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
+		      | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
+		      | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
+		      | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
+		      | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
+		      | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
+		lcdc_write(sdev, LDMT2R, value);
+
+		value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
+		      | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
+		      | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
+		      | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
+		lcdc_write(sdev, LDMT3R, value);
+	}
+
+	value = ((mode->hdisplay / 8) << 16)			/* HDCN */
+	      | (mode->htotal / 8);				/* HTCN */
+	lcdc_write(sdev, LDHCNR, value);
+
+	value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
+	      | (mode->hsync_start / 8);			/* HSYNP */
+	lcdc_write(sdev, LDHSYNR, value);
+
+	value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
+	      | (((mode->hsync_end - mode->hsync_start) & 7) << 8)
+	      | (mode->hsync_start & 7);
+	lcdc_write(sdev, LDHAJR, value);
+
+	value = ((mode->vdisplay) << 16)			/* VDLN */
+	      | mode->vtotal;					/* VTLN */
+	lcdc_write(sdev, LDVLNR, value);
+
+	value = ((mode->vsync_end - mode->vsync_start) << 16)	/* VSYNW */
+	      | mode->vsync_start;				/* VSYNP */
+	lcdc_write(sdev, LDVSYNR, value);
+}
+
+static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
+{
+	struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+	u32 value;
+
+	value = lcdc_read(sdev, LDCNT2R);
+	if (start)
+		lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
+	else
+		lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
+
+	/* Wait until power is applied/stopped. */
+	while (1) {
+		value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
+		if ((start && value) || (!start && !value))
+			break;
+
+		cpu_relax();
+	}
+
+	if (!start) {
+		/* Stop the dot clock. */
+		lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
+	}
+}
+
+/*
+ * shmob_drm_crtc_start - Configure and start the LCDC
+ * @scrtc: the SH Mobile CRTC
+ *
+ * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
+ * ...) are not touched by this function.
+ */
+static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct shmob_drm_format_info *format;
+	struct drm_device *dev = sdev->ddev;
+	struct drm_plane *plane;
+	u32 value;
+
+	if (scrtc->started)
+		return;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (WARN_ON(format == NULL))
+		return;
+
+	/* Enable clocks before accessing the hardware. */
+	shmob_drm_clk_on(sdev);
+
+	/* Reset and enable the LCDC. */
+	lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
+	lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
+	lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
+
+	/* Stop the LCDC first and disable all interrupts. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+	lcdc_write(sdev, LDINTR, 0);
+
+	/* Configure power supply, dot clocks and start them. */
+	lcdc_write(sdev, LDPMR, 0);
+
+	value = sdev->lddckr;
+	if (idata->clk_div) {
+		/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
+		 * denominator.
+		 */
+		lcdc_write(sdev, LDDCKPAT1R, 0);
+		lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+
+		if (idata->clk_div == 1)
+			value |= LDDCKR_MOSEL;
+		else
+			value |= idata->clk_div;
+	}
+
+	lcdc_write(sdev, LDDCKR, value);
+	lcdc_write(sdev, LDDCKSTPR, 0);
+	lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
+
+	/* TODO: Setup SYS panel */
+
+	/* Setup geometry, format, frame buffer memory and operation mode. */
+	shmob_drm_crtc_setup_geometry(scrtc);
+
+	/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+	lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
+	lcdc_write(sdev, LDMLSR, scrtc->line_size);
+	lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
+	if (format->yuv)
+		lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
+	lcdc_write(sdev, LDSM1R, 0);
+
+	/* Word and long word swap. */
+	switch (format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		value = LDDDSR_LS | LDDDSR_WS;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		value = LDDDSR_LS;
+		break;
+	}
+	lcdc_write(sdev, LDDDSR, value);
+
+	/* Setup planes. */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		if (plane->crtc == crtc)
+			shmob_drm_plane_setup(plane);
+	}
+
+	/* Enable the display output. */
+	lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
+
+	shmob_drm_crtc_start_stop(scrtc, true);
+
+	scrtc->started = true;
+}
+
+static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	if (!scrtc->started)
+		return;
+
+	/* Disable the MERAM cache. */
+	if (scrtc->cache) {
+		sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+		scrtc->cache = NULL;
+	}
+
+	/* Stop the LCDC. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+
+	/* Disable the display output. */
+	lcdc_write(sdev, LDCNT1R, 0);
+
+	/* Stop clocks. */
+	shmob_drm_clk_off(sdev);
+
+	scrtc->started = false;
+}
+
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
+{
+	shmob_drm_crtc_stop(scrtc);
+}
+
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
+{
+	if (scrtc->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	shmob_drm_crtc_start(scrtc);
+}
+
+static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
+					int x, int y)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	scrtc->dma[0] = gem->paddr + fb->offsets[0]
+		      + y * fb->pitches[0] + x * bpp / 8;
+
+	if (scrtc->format->yuv) {
+		bpp = scrtc->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		scrtc->dma[1] = gem->paddr + fb->offsets[1]
+			      + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			      + x * (bpp == 16 ? 2 : 1);
+	}
+
+	if (scrtc->cache)
+		sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
+					     scrtc->dma[0], scrtc->dma[1],
+					     &scrtc->dma[0], &scrtc->dma[1]);
+}
+
+static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
+
+	lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
+	if (scrtc->format->yuv)
+		lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
+
+	lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+#define to_shmob_crtc(c)	container_of(c, struct shmob_drm_crtc, crtc)
+
+static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+
+	if (scrtc->dpms == mode)
+		return;
+
+	if (mode == DRM_MODE_DPMS_ON)
+		shmob_drm_crtc_start(scrtc);
+	else
+		shmob_drm_crtc_stop(scrtc);
+
+	scrtc->dpms = mode;
+}
+
+static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
+	const struct shmob_drm_format_info *format;
+	void *cache;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
+			crtc->fb->pixel_format);
+		return -EINVAL;
+	}
+
+	scrtc->format = format;
+	scrtc->line_size = crtc->fb->pitches[0];
+
+	if (sdev->meram) {
+		/* Enable MERAM cache if configured. We need to de-init
+		 * configured ICBs before we can re-initialize them.
+		 */
+		if (scrtc->cache) {
+			sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+			scrtc->cache = NULL;
+		}
+
+		cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
+						    crtc->fb->pitches[0],
+						    adjusted_mode->vdisplay,
+						    format->meram,
+						    &scrtc->line_size);
+		if (!IS_ERR(cache))
+			scrtc->cache = cache;
+	}
+
+	shmob_drm_crtc_compute_base(scrtc, x, y);
+
+	return 0;
+}
+
+static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+					struct drm_framebuffer *old_fb)
+{
+	shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
+
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+	.dpms = shmob_drm_crtc_dpms,
+	.mode_fixup = shmob_drm_crtc_mode_fixup,
+	.prepare = shmob_drm_crtc_mode_prepare,
+	.commit = shmob_drm_crtc_mode_commit,
+	.mode_set = shmob_drm_crtc_mode_set,
+	.mode_set_base = shmob_drm_crtc_mode_set_base,
+};
+
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	/* Destroy the pending vertical blanking event associated with the
+	 * pending page flip, if any, and disable vertical blanking interrupts.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	if (event && event->base.file_priv == file) {
+		scrtc->event = NULL;
+		event->base.destroy(&event->base);
+		drm_vblank_put(dev, 0);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	struct timeval vblanktime;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	scrtc->event = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (event == NULL)
+		return;
+
+	event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
+	event->event.tv_sec = vblanktime.tv_sec;
+	event->event.tv_usec = vblanktime.tv_usec;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+	wake_up_interruptible(&event->base.file_priv->event_wait);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	drm_vblank_put(dev, 0);
+}
+
+static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
+				    struct drm_framebuffer *fb,
+				    struct drm_pending_vblank_event *event)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (scrtc->event != NULL) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	crtc->fb = fb;
+	shmob_drm_crtc_update_base(scrtc);
+
+	if (event) {
+		event->pipe = 0;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		scrtc->event = event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		drm_vblank_get(dev, 0);
+	}
+
+	return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = shmob_drm_crtc_page_flip,
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
+{
+	struct drm_crtc *crtc = &sdev->crtc.crtc;
+	int ret;
+
+	sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+
+	ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+#define to_shmob_encoder(e) \
+	container_of(e, struct shmob_drm_encoder, encoder)
+
+static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
+	struct shmob_drm_device *sdev = encoder->dev->dev_private;
+	struct shmob_drm_connector *scon = &sdev->connector;
+
+	if (senc->dpms == mode)
+		return;
+
+	shmob_drm_backlight_dpms(scon, mode);
+
+	senc->dpms = mode;
+}
+
+static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+					 const struct drm_display_mode *mode,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	struct drm_connector *connector = &sdev->connector.connector;
+	const struct drm_display_mode *panel_mode;
+
+	if (list_empty(&connector->modes)) {
+		dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+		return false;
+	}
+
+	/* The flat panel mode is fixed, just copy it to the adjusted mode. */
+	panel_mode = list_first_entry(&connector->modes,
+				      struct drm_display_mode, head);
+	drm_mode_copy(adjusted_mode, panel_mode);
+
+	return true;
+}
+
+static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = shmob_drm_encoder_dpms,
+	.mode_fixup = shmob_drm_encoder_mode_fixup,
+	.prepare = shmob_drm_encoder_mode_prepare,
+	.commit = shmob_drm_encoder_mode_commit,
+	.mode_set = shmob_drm_encoder_mode_set,
+};
+
+static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = shmob_drm_encoder_destroy,
+};
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
+{
+	struct drm_encoder *encoder = &sdev->encoder.encoder;
+	int ret;
+
+	sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
+
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable)
+{
+	unsigned long flags;
+	u32 ldintr;
+
+	/* Be careful not to acknowledge any pending interrupt. */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
+	if (enable)
+		ldintr |= LDINTR_VEE;
+	else
+		ldintr &= ~LDINTR_VEE;
+	lcdc_write(sdev, LDINTR, ldintr);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+#define to_shmob_connector(c) \
+	container_of(c, struct shmob_drm_connector, connector)
+
+static int shmob_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct shmob_drm_device *sdev = connector->dev->dev_private;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (mode == NULL)
+		return 0;
+
+	mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+	mode->clock = sdev->pdata->panel.mode.clock;
+	mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
+	mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
+	mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
+	mode->htotal = sdev->pdata->panel.mode.htotal;
+	mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
+	mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
+	mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
+	mode->vtotal = sdev->pdata->panel.mode.vtotal;
+	mode->flags = sdev->pdata->panel.mode.flags;
+
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	return 1;
+}
+
+static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
+					  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+shmob_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	return scon->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = shmob_drm_connector_get_modes,
+	.mode_valid = shmob_drm_connector_mode_valid,
+	.best_encoder = shmob_drm_connector_best_encoder,
+};
+
+static void shmob_drm_connector_destroy(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	shmob_drm_backlight_exit(scon);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+shmob_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = shmob_drm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = shmob_drm_connector_destroy,
+};
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &sdev->connector.connector;
+	int ret;
+
+	sdev->connector.encoder = encoder;
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_sysfs_connector_add(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = shmob_drm_backlight_init(&sdev->connector);
+	if (ret < 0)
+		goto err_sysfs;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_backlight;
+
+	connector->encoder = encoder;
+
+	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	drm_connector_property_set_value(connector,
+		sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+	return 0;
+
+err_backlight:
+	shmob_drm_backlight_exit(&sdev->connector);
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
new file mode 100644
index 0000000..e5bd109
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -0,0 +1,60 @@
+/*
+ * shmob_drm_crtc.h  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_CRTC_H__
+#define __SHMOB_DRM_CRTC_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct backlight_device;
+struct shmob_drm_device;
+
+struct shmob_drm_crtc {
+	struct drm_crtc crtc;
+
+	struct drm_pending_vblank_event *event;
+	int dpms;
+
+	const struct shmob_drm_format_info *format;
+	void *cache;
+	unsigned long dma[2];
+	unsigned int line_size;
+	bool started;
+};
+
+struct shmob_drm_encoder {
+	struct drm_encoder encoder;
+	int dpms;
+};
+
+struct shmob_drm_connector {
+	struct drm_connector connector;
+	struct drm_encoder *encoder;
+
+	struct backlight_device *backlight;
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file);
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder);
+
+#endif /* __SHMOB_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
new file mode 100644
index 0000000..c71d493
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -0,0 +1,361 @@
+/*
+ * shmob_drm_drv.c  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Hardware initialization
+ */
+
+static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+{
+	static const u32 ldmt1r[] = {
+		[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
+		[SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
+		[SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
+		[SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
+		[SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
+		[SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
+		[SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
+		[SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
+		[SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
+		[SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
+		[SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
+		[SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
+		[SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
+		[SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
+		[SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
+		[SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
+		[SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
+		[SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
+		[SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
+	};
+
+	if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
+		dev_err(sdev->dev, "invalid interface type %u\n",
+			sdev->pdata->iface.interface);
+		return -EINVAL;
+	}
+
+	sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
+	return 0;
+}
+
+static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+					    enum shmob_drm_clk_source clksrc)
+{
+	struct clk *clk;
+	char *clkname;
+
+	switch (clksrc) {
+	case SHMOB_DRM_CLK_BUS:
+		clkname = "bus_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_BUS;
+		break;
+	case SHMOB_DRM_CLK_PERIPHERAL:
+		clkname = "peripheral_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_MIPI;
+		break;
+	case SHMOB_DRM_CLK_EXTERNAL:
+		clkname = NULL;
+		sdev->lddckr = LDDCKR_ICKSEL_HDMI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clk = clk_get(sdev->dev, clkname);
+	if (IS_ERR(clk)) {
+		dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
+		return PTR_ERR(clk);
+	}
+
+	sdev->clock = clk;
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+static int shmob_drm_unload(struct drm_device *dev)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	if (sdev->clock)
+		clk_put(sdev->clock);
+
+	if (sdev->mmio)
+		iounmap(sdev->mmio);
+
+	dev->dev_private = NULL;
+	kfree(sdev);
+
+	return 0;
+}
+
+static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
+	struct platform_device *pdev = dev->platformdev;
+	struct shmob_drm_device *sdev;
+	struct resource *res;
+	unsigned int i;
+	int ret;
+
+	if (pdata == NULL) {
+		dev_err(dev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	if (sdev == NULL) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	sdev->dev = &pdev->dev;
+	sdev->pdata = pdata;
+	spin_lock_init(&sdev->irq_lock);
+
+	sdev->ddev = dev;
+	dev->dev_private = sdev;
+
+	/* I/O resources and clocks */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	sdev->mmio = ioremap_nocache(res->start, resource_size(res));
+	if (sdev->mmio == NULL) {
+		dev_err(&pdev->dev, "failed to remap memory resource\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_init_interface(sdev);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_modeset_init(sdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize mode setting\n");
+		goto done;
+	}
+
+	for (i = 0; i < 4; ++i) {
+		ret = shmob_drm_plane_create(sdev, i);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to create plane %u\n", i);
+			goto done;
+		}
+	}
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize vblank\n");
+		goto done;
+	}
+
+	ret = drm_irq_install(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to install IRQ handler\n");
+		goto done;
+	}
+
+done:
+	if (ret)
+		shmob_drm_unload(dev);
+
+	return ret;
+}
+
+static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
+}
+
+static irqreturn_t shmob_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	unsigned long flags;
+	u32 status;
+
+	/* Acknowledge interrupts. Putting interrupt enable and interrupt flag
+	 * bits in the same register is really brain-dead design and requires
+	 * taking a spinlock.
+	 */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	status = lcdc_read(sdev, LDINTR);
+	lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+
+	if (status & LDINTR_VES) {
+		drm_handle_vblank(dev, 0);
+		shmob_drm_crtc_finish_page_flip(&sdev->crtc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, true);
+
+	return 0;
+}
+
+static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, false);
+}
+
+static const struct file_operations shmob_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.fasync		= drm_fasync,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver shmob_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.load			= shmob_drm_load,
+	.unload			= shmob_drm_unload,
+	.preclose		= shmob_drm_preclose,
+	.irq_handler		= shmob_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= shmob_drm_enable_vblank,
+	.disable_vblank		= shmob_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_cma_dumb_destroy,
+	.fops			= &shmob_drm_fops,
+	.name			= "shmob-drm",
+	.desc			= "Renesas SH Mobile DRM",
+	.date			= "20120424",
+	.major			= 1,
+	.minor			= 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#if CONFIG_PM_SLEEP
+static int shmob_drm_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct shmob_drm_device *sdev = ddev->dev_private;
+
+	drm_kms_helper_poll_disable(ddev);
+	shmob_drm_crtc_suspend(&sdev->crtc);
+
+	return 0;
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct shmob_drm_device *sdev = ddev->dev_private;
+
+	mutex_lock(&sdev->ddev->mode_config.mutex);
+	shmob_drm_crtc_resume(&sdev->crtc);
+	mutex_unlock(&sdev->ddev->mode_config.mutex);
+
+	drm_kms_helper_poll_enable(sdev->ddev);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static int __devinit shmob_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&shmob_drm_driver, pdev);
+}
+
+static int __devexit shmob_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&shmob_drm_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver shmob_drm_platform_driver = {
+	.probe		= shmob_drm_probe,
+	.remove		= __devexit_p(shmob_drm_remove),
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "shmob-drm",
+		.pm	= &shmob_drm_pm_ops,
+	},
+};
+
+module_platform_driver(shmob_drm_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
new file mode 100644
index 0000000..4d46b81
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -0,0 +1,47 @@
+/*
+ * shmob_drm.h  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_DRV_H__
+#define __SHMOB_DRM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/platform_data/shmob_drm.h>
+#include <linux/spinlock.h>
+
+#include "shmob_drm_crtc.h"
+
+struct clk;
+struct device;
+struct drm_device;
+struct sh_mobile_meram_info;
+
+struct shmob_drm_device {
+	struct device *dev;
+	const struct shmob_drm_platform_data *pdata;
+
+	void __iomem *mmio;
+	struct clk *clock;
+	struct sh_mobile_meram_info *meram;
+	u32 lddckr;
+	u32 ldmt1r;
+
+	spinlock_t irq_lock;		/* Protects hardware LDINTR register */
+
+	struct drm_device *ddev;
+
+	struct shmob_drm_crtc crtc;
+	struct shmob_drm_encoder encoder;
+	struct shmob_drm_connector connector;
+};
+
+#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
new file mode 100644
index 0000000..c291ee3
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -0,0 +1,160 @@
+/*
+ * shmob_drm_kms.c  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
+	{
+		.fourcc = DRM_FORMAT_RGB565,
+		.bpp = 16,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB16,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_RGB888,
+		.bpp = 24,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB24,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_ARGB8888,
+		.bpp = 32,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_ARGB32,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_NV12,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV21,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV16,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV61,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV24,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	}, {
+		.fourcc = DRM_FORMAT_NV42,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	},
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
+		if (shmob_drm_format_infos[i].fourcc == fourcc)
+			return &shmob_drm_format_infos[i];
+	}
+
+	return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+static struct drm_framebuffer *
+shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+		    struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(mode_cmd->pixel_format);
+	if (format == NULL) {
+		dev_dbg(dev->dev, "unsupported pixel format %08x\n",
+			mode_cmd->pixel_format);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
+		dev_dbg(dev->dev, "valid pitch value %u\n",
+			mode_cmd->pitches[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (format->yuv) {
+		unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
+
+		if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
+			dev_dbg(dev->dev,
+				"luma and chroma pitches do not match\n");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
+	.fb_create = shmob_drm_fb_create,
+};
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
+{
+	drm_mode_config_init(sdev->ddev);
+
+	shmob_drm_crtc_create(sdev);
+	shmob_drm_encoder_create(sdev);
+	shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+
+	drm_kms_helper_poll_init(sdev->ddev);
+
+	sdev->ddev->mode_config.min_width = 0;
+	sdev->ddev->mode_config.min_height = 0;
+	sdev->ddev->mode_config.max_width = 4095;
+	sdev->ddev->mode_config.max_height = 4095;
+	sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+
+	drm_helper_disable_unused_functions(sdev->ddev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
new file mode 100644
index 0000000..9495c91
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -0,0 +1,34 @@
+/*
+ * shmob_drm_kms.h  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_KMS_H__
+#define __SHMOB_DRM_KMS_H__
+
+#include <linux/types.h>
+
+struct drm_gem_cma_object;
+struct shmob_drm_device;
+
+struct shmob_drm_format_info {
+	u32 fourcc;
+	unsigned int bpp;
+	bool yuv;
+	u32 lddfr;
+	unsigned int meram;
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
+
+#endif /* __SHMOB_DRM_KMS_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
new file mode 100644
index 0000000..e1eb899
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -0,0 +1,268 @@
+/*
+ * shmob_drm_plane.c  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+struct shmob_drm_plane {
+	struct drm_plane plane;
+	unsigned int index;
+	unsigned int alpha;
+
+	const struct shmob_drm_format_info *format;
+	unsigned long dma[2];
+
+	unsigned int src_x;
+	unsigned int src_y;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+};
+
+#define to_shmob_plane(p)	container_of(p, struct shmob_drm_plane, plane)
+
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
+					 struct drm_framebuffer *fb,
+					 int x, int y)
+{
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = splane->format->yuv ? 8 : splane->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	splane->dma[0] = gem->paddr + fb->offsets[0]
+		       + y * fb->pitches[0] + x * bpp / 8;
+
+	if (splane->format->yuv) {
+		bpp = splane->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		splane->dma[1] = gem->paddr + fb->offsets[1]
+			       + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			       + x * (bpp == 16 ? 2 : 1);
+	}
+}
+
+static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
+				    struct drm_framebuffer *fb)
+{
+	struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+	u32 format;
+
+	/* TODO: Support ROP3 mode */
+	format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		format |= LDBBSIFR_SWPL;
+		break;
+	}
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
+		break;
+	case DRM_FORMAT_RGB888:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
+		break;
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
+		break;
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
+		break;
+	}
+
+#define plane_reg_dump(sdev, splane, reg) \
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+		splane->index, #reg, \
+		lcdc_read(sdev, reg(splane->index)), \
+		lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+
+	lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), format);
+
+	lcdc_write(sdev, LDBnBSSZR(splane->index),
+		   (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+		   (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+	lcdc_write(sdev, LDBnBLOCR(splane->index),
+		   (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+		   (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+	lcdc_write(sdev, LDBnBSMWR(splane->index),
+		   fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
+
+	shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
+
+	lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
+	if (splane->format->yuv)
+		lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+
+	lcdc_write(sdev, LDBCR,
+		   LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+}
+
+void shmob_drm_plane_setup(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	if (plane->fb == NULL || !plane->enabled)
+		return;
+
+	__shmob_drm_plane_setup(splane, plane->fb);
+}
+
+static int
+shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		       unsigned int crtc_w, unsigned int crtc_h,
+		       uint32_t src_x, uint32_t src_y,
+		       uint32_t src_w, uint32_t src_h)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
+			fb->pixel_format);
+		return -EINVAL;
+	}
+
+	if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+		dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	splane->format = format;
+
+	splane->src_x = src_x >> 16;
+	splane->src_y = src_y >> 16;
+	splane->crtc_x = crtc_x;
+	splane->crtc_y = crtc_y;
+	splane->crtc_w = crtc_w;
+	splane->crtc_h = crtc_h;
+
+	__shmob_drm_plane_setup(splane, fb);
+	return 0;
+}
+
+static int shmob_drm_plane_disable(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+
+	splane->format = NULL;
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
+	return 0;
+}
+
+static void shmob_drm_plane_destroy(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	shmob_drm_plane_disable(plane);
+	drm_plane_cleanup(plane);
+	kfree(splane);
+}
+
+static const struct drm_plane_funcs shmob_drm_plane_funcs = {
+	.update_plane = shmob_drm_plane_update,
+	.disable_plane = shmob_drm_plane_disable,
+	.destroy = shmob_drm_plane_destroy,
+};
+
+static const uint32_t formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_NV16,
+	DRM_FORMAT_NV61,
+	DRM_FORMAT_NV24,
+	DRM_FORMAT_NV42,
+};
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+{
+	struct shmob_drm_plane *splane;
+	int ret;
+
+	splane = kzalloc(sizeof(*splane), GFP_KERNEL);
+	if (splane == NULL)
+		return -ENOMEM;
+
+	splane->index = index;
+	splane->alpha = 255;
+
+	ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
+			     &shmob_drm_plane_funcs, formats,
+			     ARRAY_SIZE(formats), false);
+	if (ret < 0)
+		kfree(splane);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
new file mode 100644
index 0000000..99623d0
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -0,0 +1,22 @@
+/*
+ * shmob_drm_plane.h  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_PLANE_H__
+#define __SHMOB_DRM_PLANE_H__
+
+struct shmob_drm_device;
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
+void shmob_drm_plane_setup(struct drm_plane *plane);
+
+#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
new file mode 100644
index 0000000..7923cdd
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -0,0 +1,311 @@
+/*
+ * shmob_drm_regs.h  --  SH Mobile DRM registers
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_REGS_H__
+#define __SHMOB_DRM_REGS_H__
+
+#include <linux/io.h>
+
+/* Register definitions */
+#define LDDCKPAT1R		0x400
+#define LDDCKPAT2R		0x404
+#define LDDCKR			0x410
+#define LDDCKR_ICKSEL_BUS	(0 << 16)
+#define LDDCKR_ICKSEL_MIPI	(1 << 16)
+#define LDDCKR_ICKSEL_HDMI	(2 << 16)
+#define LDDCKR_ICKSEL_EXT	(3 << 16)
+#define LDDCKR_ICKSEL_MASK	(7 << 16)
+#define LDDCKR_MOSEL		(1 << 6)
+#define LDDCKSTPR		0x414
+#define LDDCKSTPR_DCKSTS	(1 << 16)
+#define LDDCKSTPR_DCKSTP	(1 << 0)
+#define LDMT1R			0x418
+#define LDMT1R_VPOL		(1 << 28)
+#define LDMT1R_HPOL		(1 << 27)
+#define LDMT1R_DWPOL		(1 << 26)
+#define LDMT1R_DIPOL		(1 << 25)
+#define LDMT1R_DAPOL		(1 << 24)
+#define LDMT1R_HSCNT		(1 << 17)
+#define LDMT1R_DWCNT		(1 << 16)
+#define LDMT1R_IFM		(1 << 12)
+#define LDMT1R_MIFTYP_RGB8	(0x0 << 0)
+#define LDMT1R_MIFTYP_RGB9	(0x4 << 0)
+#define LDMT1R_MIFTYP_RGB12A	(0x5 << 0)
+#define LDMT1R_MIFTYP_RGB12B	(0x6 << 0)
+#define LDMT1R_MIFTYP_RGB16	(0x7 << 0)
+#define LDMT1R_MIFTYP_RGB18	(0xa << 0)
+#define LDMT1R_MIFTYP_RGB24	(0xb << 0)
+#define LDMT1R_MIFTYP_YCBCR	(0xf << 0)
+#define LDMT1R_MIFTYP_SYS8A	(0x0 << 0)
+#define LDMT1R_MIFTYP_SYS8B	(0x1 << 0)
+#define LDMT1R_MIFTYP_SYS8C	(0x2 << 0)
+#define LDMT1R_MIFTYP_SYS8D	(0x3 << 0)
+#define LDMT1R_MIFTYP_SYS9	(0x4 << 0)
+#define LDMT1R_MIFTYP_SYS12	(0x5 << 0)
+#define LDMT1R_MIFTYP_SYS16A	(0x7 << 0)
+#define LDMT1R_MIFTYP_SYS16B	(0x8 << 0)
+#define LDMT1R_MIFTYP_SYS16C	(0x9 << 0)
+#define LDMT1R_MIFTYP_SYS18	(0xa << 0)
+#define LDMT1R_MIFTYP_SYS24	(0xb << 0)
+#define LDMT1R_MIFTYP_MASK	(0xf << 0)
+#define LDMT2R			0x41c
+#define LDMT2R_CSUP_MASK	(7 << 26)
+#define LDMT2R_CSUP_SHIFT	26
+#define LDMT2R_RSV		(1 << 25)
+#define LDMT2R_VSEL		(1 << 24)
+#define LDMT2R_WCSC_MASK	(0xff << 16)
+#define LDMT2R_WCSC_SHIFT	16
+#define LDMT2R_WCEC_MASK	(0xff << 8)
+#define LDMT2R_WCEC_SHIFT	8
+#define LDMT2R_WCLW_MASK	(0xff << 0)
+#define LDMT2R_WCLW_SHIFT	0
+#define LDMT3R			0x420
+#define LDMT3R_RDLC_MASK	(0x3f << 24)
+#define LDMT3R_RDLC_SHIFT	24
+#define LDMT3R_RCSC_MASK	(0xff << 16)
+#define LDMT3R_RCSC_SHIFT	16
+#define LDMT3R_RCEC_MASK	(0xff << 8)
+#define LDMT3R_RCEC_SHIFT	8
+#define LDMT3R_RCLW_MASK	(0xff << 0)
+#define LDMT3R_RCLW_SHIFT	0
+#define LDDFR			0x424
+#define LDDFR_CF1		(1 << 18)
+#define LDDFR_CF0		(1 << 17)
+#define LDDFR_CC		(1 << 16)
+#define LDDFR_YF_420		(0 << 8)
+#define LDDFR_YF_422		(1 << 8)
+#define LDDFR_YF_444		(2 << 8)
+#define LDDFR_YF_MASK		(3 << 8)
+#define LDDFR_PKF_ARGB32	(0x00 << 0)
+#define LDDFR_PKF_RGB16		(0x03 << 0)
+#define LDDFR_PKF_RGB24		(0x0b << 0)
+#define LDDFR_PKF_MASK		(0x1f << 0)
+#define LDSM1R			0x428
+#define LDSM1R_OS		(1 << 0)
+#define LDSM2R			0x42c
+#define LDSM2R_OSTRG		(1 << 0)
+#define LDSA1R			0x430
+#define LDSA2R			0x434
+#define LDMLSR			0x438
+#define LDWBFR			0x43c
+#define LDWBCNTR		0x440
+#define LDWBAR			0x444
+#define LDHCNR			0x448
+#define LDHSYNR			0x44c
+#define LDVLNR			0x450
+#define LDVSYNR			0x454
+#define LDHPDR			0x458
+#define LDVPDR			0x45c
+#define LDPMR			0x460
+#define LDPMR_LPS		(3 << 0)
+#define LDINTR			0x468
+#define LDINTR_FE		(1 << 10)
+#define LDINTR_VSE		(1 << 9)
+#define LDINTR_VEE		(1 << 8)
+#define LDINTR_FS		(1 << 2)
+#define LDINTR_VSS		(1 << 1)
+#define LDINTR_VES		(1 << 0)
+#define LDINTR_STATUS_MASK	(0xff << 0)
+#define LDSR			0x46c
+#define LDSR_MSS		(1 << 10)
+#define LDSR_MRS		(1 << 8)
+#define LDSR_AS			(1 << 1)
+#define LDCNT1R			0x470
+#define LDCNT1R_DE		(1 << 0)
+#define LDCNT2R			0x474
+#define LDCNT2R_BR		(1 << 8)
+#define LDCNT2R_MD		(1 << 3)
+#define LDCNT2R_SE		(1 << 2)
+#define LDCNT2R_ME		(1 << 1)
+#define LDCNT2R_DO		(1 << 0)
+#define LDRCNTR			0x478
+#define LDRCNTR_SRS		(1 << 17)
+#define LDRCNTR_SRC		(1 << 16)
+#define LDRCNTR_MRS		(1 << 1)
+#define LDRCNTR_MRC		(1 << 0)
+#define LDDDSR			0x47c
+#define LDDDSR_LS		(1 << 2)
+#define LDDDSR_WS		(1 << 1)
+#define LDDDSR_BS		(1 << 0)
+#define LDHAJR			0x4a0
+
+#define LDDWD0R			0x800
+#define LDDWDxR_WDACT		(1 << 28)
+#define LDDWDxR_RSW		(1 << 24)
+#define LDDRDR			0x840
+#define LDDRDR_RSR		(1 << 24)
+#define LDDRDR_DRD_MASK		(0x3ffff << 0)
+#define LDDWAR			0x900
+#define LDDWAR_WA		(1 << 0)
+#define LDDRAR			0x904
+#define LDDRAR_RA		(1 << 0)
+
+#define LDBCR			0xb00
+#define LDBCR_UPC(n)		(1 << ((n) + 16))
+#define LDBCR_UPF(n)		(1 << ((n) + 8))
+#define LDBCR_UPD(n)		(1 << ((n) + 0))
+#define LDBnBSIFR(n)		(0xb20 + (n) * 0x20 + 0x00)
+#define LDBBSIFR_EN		(1 << 31)
+#define LDBBSIFR_VS		(1 << 29)
+#define LDBBSIFR_BRSEL		(1 << 28)
+#define LDBBSIFR_MX		(1 << 27)
+#define LDBBSIFR_MY		(1 << 26)
+#define LDBBSIFR_CV3		(3 << 24)
+#define LDBBSIFR_CV2		(2 << 24)
+#define LDBBSIFR_CV1		(1 << 24)
+#define LDBBSIFR_CV0		(0 << 24)
+#define LDBBSIFR_CV_MASK	(3 << 24)
+#define LDBBSIFR_LAY_MASK	(0xff << 16)
+#define LDBBSIFR_LAY_SHIFT	16
+#define LDBBSIFR_ROP3_MASK	(0xff << 16)
+#define LDBBSIFR_ROP3_SHIFT	16
+#define LDBBSIFR_AL_PL8		(3 << 14)
+#define LDBBSIFR_AL_PL1		(2 << 14)
+#define LDBBSIFR_AL_PK		(1 << 14)
+#define LDBBSIFR_AL_1		(0 << 14)
+#define LDBBSIFR_AL_MASK	(3 << 14)
+#define LDBBSIFR_SWPL		(1 << 10)
+#define LDBBSIFR_SWPW		(1 << 9)
+#define LDBBSIFR_SWPB		(1 << 8)
+#define LDBBSIFR_RY		(1 << 7)
+#define LDBBSIFR_CHRR_420	(2 << 0)
+#define LDBBSIFR_CHRR_422	(1 << 0)
+#define LDBBSIFR_CHRR_444	(0 << 0)
+#define LDBBSIFR_RPKF_ARGB32	(0x00 << 0)
+#define LDBBSIFR_RPKF_RGB16	(0x03 << 0)
+#define LDBBSIFR_RPKF_RGB24	(0x0b << 0)
+#define LDBBSIFR_RPKF_MASK	(0x1f << 0)
+#define LDBnBSSZR(n)		(0xb20 + (n) * 0x20 + 0x04)
+#define LDBBSSZR_BVSS_MASK	(0xfff << 16)
+#define LDBBSSZR_BVSS_SHIFT	16
+#define LDBBSSZR_BHSS_MASK	(0xfff << 0)
+#define LDBBSSZR_BHSS_SHIFT	0
+#define LDBnBLOCR(n)		(0xb20 + (n) * 0x20 + 0x08)
+#define LDBBLOCR_CVLC_MASK	(0xfff << 16)
+#define LDBBLOCR_CVLC_SHIFT	16
+#define LDBBLOCR_CHLC_MASK	(0xfff << 0)
+#define LDBBLOCR_CHLC_SHIFT	0
+#define LDBnBSMWR(n)		(0xb20 + (n) * 0x20 + 0x0c)
+#define LDBBSMWR_BSMWA_MASK	(0xffff << 16)
+#define LDBBSMWR_BSMWA_SHIFT	16
+#define LDBBSMWR_BSMW_MASK	(0xffff << 0)
+#define LDBBSMWR_BSMW_SHIFT	0
+#define LDBnBSAYR(n)		(0xb20 + (n) * 0x20 + 0x10)
+#define LDBBSAYR_FG1A_MASK	(0xff << 24)
+#define LDBBSAYR_FG1A_SHIFT	24
+#define LDBBSAYR_FG1R_MASK	(0xff << 16)
+#define LDBBSAYR_FG1R_SHIFT	16
+#define LDBBSAYR_FG1G_MASK	(0xff << 8)
+#define LDBBSAYR_FG1G_SHIFT	8
+#define LDBBSAYR_FG1B_MASK	(0xff << 0)
+#define LDBBSAYR_FG1B_SHIFT	0
+#define LDBnBSACR(n)		(0xb20 + (n) * 0x20 + 0x14)
+#define LDBBSACR_FG2A_MASK	(0xff << 24)
+#define LDBBSACR_FG2A_SHIFT	24
+#define LDBBSACR_FG2R_MASK	(0xff << 16)
+#define LDBBSACR_FG2R_SHIFT	16
+#define LDBBSACR_FG2G_MASK	(0xff << 8)
+#define LDBBSACR_FG2G_SHIFT	8
+#define LDBBSACR_FG2B_MASK	(0xff << 0)
+#define LDBBSACR_FG2B_SHIFT	0
+#define LDBnBSAAR(n)		(0xb20 + (n) * 0x20 + 0x18)
+#define LDBBSAAR_AP_MASK	(0xff << 24)
+#define LDBBSAAR_AP_SHIFT	24
+#define LDBBSAAR_R_MASK		(0xff << 16)
+#define LDBBSAAR_R_SHIFT	16
+#define LDBBSAAR_GY_MASK	(0xff << 8)
+#define LDBBSAAR_GY_SHIFT	8
+#define LDBBSAAR_B_MASK		(0xff << 0)
+#define LDBBSAAR_B_SHIFT	0
+#define LDBnBPPCR(n)		(0xb20 + (n) * 0x20 + 0x1c)
+#define LDBBPPCR_AP_MASK	(0xff << 24)
+#define LDBBPPCR_AP_SHIFT	24
+#define LDBBPPCR_R_MASK		(0xff << 16)
+#define LDBBPPCR_R_SHIFT	16
+#define LDBBPPCR_GY_MASK	(0xff << 8)
+#define LDBBPPCR_GY_SHIFT	8
+#define LDBBPPCR_B_MASK		(0xff << 0)
+#define LDBBPPCR_B_SHIFT	0
+#define LDBnBBGCL(n)		(0xb10 + (n) * 0x04)
+#define LDBBBGCL_BGA_MASK	(0xff << 24)
+#define LDBBBGCL_BGA_SHIFT	24
+#define LDBBBGCL_BGR_MASK	(0xff << 16)
+#define LDBBBGCL_BGR_SHIFT	16
+#define LDBBBGCL_BGG_MASK	(0xff << 8)
+#define LDBBBGCL_BGG_SHIFT	8
+#define LDBBBGCL_BGB_MASK	(0xff << 0)
+#define LDBBBGCL_BGB_SHIFT	0
+
+#define LCDC_SIDE_B_OFFSET	0x1000
+#define LCDC_MIRROR_OFFSET	0x2000
+
+static inline bool lcdc_is_banked(u32 reg)
+{
+	switch (reg) {
+	case LDMT1R:
+	case LDMT2R:
+	case LDMT3R:
+	case LDDFR:
+	case LDSM1R:
+	case LDSA1R:
+	case LDSA2R:
+	case LDMLSR:
+	case LDWBFR:
+	case LDWBCNTR:
+	case LDWBAR:
+	case LDHCNR:
+	case LDHSYNR:
+	case LDVLNR:
+	case LDVSYNR:
+	case LDHPDR:
+	case LDVPDR:
+	case LDHAJR:
+		return true;
+	default:
+		return reg >= LDBnBBGCL(0) && reg <= LDBnBPPCR(3);
+	}
+}
+
+static inline void lcdc_write_mirror(struct shmob_drm_device *sdev, u32 reg,
+				     u32 data)
+{
+	iowrite32(data, sdev->mmio + reg + LCDC_MIRROR_OFFSET);
+}
+
+static inline void lcdc_write(struct shmob_drm_device *sdev, u32 reg, u32 data)
+{
+	iowrite32(data, sdev->mmio + reg);
+	if (lcdc_is_banked(reg))
+		iowrite32(data, sdev->mmio + reg + LCDC_SIDE_B_OFFSET);
+}
+
+static inline u32 lcdc_read(struct shmob_drm_device *sdev, u32 reg)
+{
+	return ioread32(sdev->mmio + reg);
+}
+
+static inline int lcdc_wait_bit(struct shmob_drm_device *sdev, u32 reg,
+				u32 mask, u32 until)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+	while ((lcdc_read(sdev, reg) & mask) != until) {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+#endif /* __SHMOB_DRM_REGS_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f8187ea..0df71ea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -472,7 +472,7 @@
 	else
 		tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__sparc__)
+#if defined(__sparc__) || defined(__mips__)
 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
 		tmp = pgprot_noncached(tmp);
 #endif
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ba055e9..2d98ff9 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -57,11 +57,8 @@
 
 	edid = (struct edid *)udl_get_edid(udl);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 	return ret;
 }
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index ce9a611..b8c00ed 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fb.h>
+#include <linux/dma-buf.h>
 
 #include "drmP.h"
 #include "drm.h"
@@ -377,16 +378,33 @@
 {
 	struct udl_framebuffer *ufb = to_udl_fb(fb);
 	int i;
+	int ret = 0;
 
 	if (!ufb->active_16)
 		return 0;
 
+	if (ufb->obj->base.import_attach) {
+		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+					       0, ufb->obj->base.size,
+					       DMA_FROM_DEVICE);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < num_clips; i++) {
-		udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
+		ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
 				  clips[i].x2 - clips[i].x1,
 				  clips[i].y2 - clips[i].y1);
+		if (ret)
+			break;
 	}
-	return 0;
+
+	if (ufb->obj->base.import_attach) {
+		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+				       0, ufb->obj->base.size,
+				       DMA_FROM_DEVICE);
+	}
+	return ret;
 }
 
 static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 291ecc1..47b2563 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -181,11 +181,6 @@
 	int ret;
 
 	if (obj->base.import_attach) {
-		ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
-					       0, obj->base.size, DMA_BIDIRECTIONAL);
-		if (ret)
-			return -EINVAL;
-
 		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
 		if (!obj->vmapping)
 			return -ENOMEM;
@@ -206,8 +201,6 @@
 {
 	if (obj->base.import_attach) {
 		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
-		dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
-				       obj->base.size, DMA_BIDIRECTIONAL);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 4c2d836..40bf468 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,11 +41,8 @@
 	total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
 				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
 	if (total_len > 5) {
-		DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
-			"%02x %02x %02x %02x %02x %02x %02x\n",
-			total_len, desc[0],
-			desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
-			desc[7], desc[8], desc[9], desc[10]);
+		DRM_INFO("vendor descriptor length:%x data:%*ph\n",
+			total_len, 11, desc);
 
 		if ((desc[0] != total_len) || /* descriptor length */
 		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ba2c35d..09f44c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -438,7 +438,6 @@
 		DRM_ERROR("Failed allocating a device private struct.\n");
 		return -ENOMEM;
 	}
-	memset(dev_priv, 0, sizeof(*dev_priv));
 
 	pci_set_master(dev->pdev);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c50724b..5474394 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -483,7 +483,6 @@
 	}
 
 	/* only need to do this once */
-	memset(cmd, 0, fifo_size);
 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
 	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 55e9c86..38be186 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -200,14 +200,11 @@
 			drm_mode_connector_update_edid_property(
 					connector, edid);
 			n = drm_add_edid_modes(connector, edid);
-			kfree(connector->display_info.raw_edid);
-			connector->display_info.raw_edid = edid;
 		} else {
 			drm_mode_connector_update_edid_property(
 					connector, NULL);
-			connector->display_info.raw_edid = NULL;
-			kfree(edid);
 		}
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(dev);
 		struct omap_video_timings timings = {0};
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d6b67bb..d5f0c16 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1367,6 +1367,7 @@
 
 /* Cache management (drm_cache.c) */
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+void drm_clflush_sg(struct sg_table *st);
 void drm_clflush_virt_range(char *addr, unsigned long length);
 
 				/* Locking IOCTL support (drm_lock.h) */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index bfacf0d..eb91d52 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -215,8 +215,6 @@
 	u32 color_formats;
 
 	u8 cea_rev;
-
-	char *raw_edid; /* if any */
 };
 
 struct drm_framebuffer_funcs {
@@ -593,6 +591,7 @@
 	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
 	int audio_latency[2];
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+	unsigned bad_edid_counter;
 };
 
 /**
@@ -1035,7 +1034,7 @@
 				int hdisplay, int vdisplay);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
 extern bool drm_edid_is_valid(struct edid *edid);
 struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
 					   int hsize, int vsize, int fresh,
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
new file mode 100644
index 0000000..76c7098
--- /dev/null
+++ b/include/drm/drm_fb_cma_helper.h
@@ -0,0 +1,27 @@
+#ifndef __DRM_FB_CMA_HELPER_H__
+#define __DRM_FB_CMA_HELPER_H__
+
+struct drm_fbdev_cma;
+struct drm_gem_cma_object;
+
+struct drm_framebuffer;
+struct drm_device;
+struct drm_file;
+struct drm_mode_fb_cmd2;
+
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count);
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
+
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+	struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+	unsigned int plane);
+
+#endif
+
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index f462118..646ae5f 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -106,6 +106,8 @@
 #define DRM_FORMAT_NV21		fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
 #define DRM_FORMAT_NV16		fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
 #define DRM_FORMAT_NV61		fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24		fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42		fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
 /* special NV12 tiled format */
 #define DRM_FORMAT_NV12MT	fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
new file mode 100644
index 0000000..f0f6b1a
--- /dev/null
+++ b/include/drm/drm_gem_cma_helper.h
@@ -0,0 +1,44 @@
+#ifndef __DRM_GEM_CMA_HELPER_H__
+#define __DRM_GEM_CMA_HELPER_H__
+
+struct drm_gem_cma_object {
+	struct drm_gem_object base;
+	dma_addr_t paddr;
+	void *vaddr;
+};
+
+static inline struct drm_gem_cma_object *
+to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
+{
+	return container_of(gem_obj, struct drm_gem_cma_object, base);
+}
+
+/* free gem object. */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+		struct drm_device *drm, struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *drm, uint32_t handle, uint64_t *offset);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/*
+ * destroy memory region allocated.
+ *	- a gem handle and physical memory region pointed by a gem object
+ *	would be released by drm_gem_handle_delete().
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int handle);
+
+/* allocate physical memory. */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+		unsigned int size);
+
+extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+
+#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index ee5389d..1d1a858 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -37,6 +37,8 @@
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
 #define SAREA_MAX                       0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX                       0x4000U
 #elif defined(__ia64__)
 #define SAREA_MAX                       0x10000U	/* 64kB */
 #else
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8cc7083..c883300 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -203,6 +203,9 @@
 #define DRM_I915_GEM_WAIT	0x2c
 #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
 #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
+#define DRM_I915_GEM_SET_CACHEING	0x2f
+#define DRM_I915_GEM_GET_CACHEING	0x30
+#define DRM_I915_REG_READ		0x31
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -227,6 +230,8 @@
 #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHEING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
+#define DRM_IOCTL_I915_GEM_GET_CACHEING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
 #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
@@ -249,6 +254,7 @@
 #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -305,6 +311,9 @@
 #define I915_PARAM_HAS_LLC     	 	 17
 #define I915_PARAM_HAS_ALIASING_PPGTT	 18
 #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
+#define I915_PARAM_HAS_SEMAPHORES	 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
+#define I915_PARAM_RSVD_FOR_FUTURE_USE	 22
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -698,10 +707,31 @@
 	/** Handle of the buffer to check for busy */
 	__u32 handle;
 
-	/** Return busy status (1 if busy, 0 if idle) */
+	/** Return busy status (1 if busy, 0 if idle).
+	 * The high word is used to indicate on which rings the object
+	 * currently resides:
+	 *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+	 */
 	__u32 busy;
 };
 
+#define I915_CACHEING_NONE		0
+#define I915_CACHEING_CACHED		1
+
+struct drm_i915_gem_cacheing {
+	/**
+	 * Handle of the buffer to set/get the cacheing level of. */
+	__u32 handle;
+
+	/**
+	 * Cacheing level to apply or return value
+	 *
+	 * bits0-15 are for generic cacheing control (i.e. the above defined
+	 * values). bits16-31 are reserved for platform-specific variations
+	 * (e.g. l3$ caching on gen7). */
+	__u32 cacheing;
+};
+
 #define I915_TILING_NONE	0
 #define I915_TILING_X		1
 #define I915_TILING_Y		2
@@ -918,4 +948,8 @@
 	__u32 pad;
 };
 
+struct drm_i915_reg_read {
+	__u64 offset;
+	__u64 val; /* Return value */
+};
 #endif				/* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 8e29d55..2e37e9f 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -30,16 +30,10 @@
 bool intel_enable_gtt(void);
 
 void intel_gtt_chipset_flush(void);
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
-			 struct scatterlist **sg_list, int *num_sg);
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-				 unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
 				 unsigned int pg_start,
 				 unsigned int flags);
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
-			    struct page **pages, unsigned int flags);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 
 /* Special gtt memory types */
 #define AGP_DCACHE_MEMORY	1
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
new file mode 100644
index 0000000..7c686d3
--- /dev/null
+++ b/include/linux/platform_data/shmob_drm.h
@@ -0,0 +1,99 @@
+/*
+ * shmob_drm.h  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_H__
+#define __SHMOB_DRM_H__
+
+#include <linux/kernel.h>
+
+#include <drm/drm_mode.h>
+
+struct sh_mobile_meram_cfg;
+struct sh_mobile_meram_info;
+
+enum shmob_drm_clk_source {
+	SHMOB_DRM_CLK_BUS,
+	SHMOB_DRM_CLK_PERIPHERAL,
+	SHMOB_DRM_CLK_EXTERNAL,
+};
+
+enum shmob_drm_interface {
+	SHMOB_DRM_IFACE_RGB8,		/* 24bpp, 8:8:8 */
+	SHMOB_DRM_IFACE_RGB9,		/* 18bpp, 9:9 */
+	SHMOB_DRM_IFACE_RGB12A,		/* 24bpp, 12:12 */
+	SHMOB_DRM_IFACE_RGB12B,		/* 12bpp */
+	SHMOB_DRM_IFACE_RGB16,		/* 16bpp */
+	SHMOB_DRM_IFACE_RGB18,		/* 18bpp */
+	SHMOB_DRM_IFACE_RGB24,		/* 24bpp */
+	SHMOB_DRM_IFACE_YUV422,		/* 16bpp */
+	SHMOB_DRM_IFACE_SYS8A,		/* 24bpp, 8:8:8 */
+	SHMOB_DRM_IFACE_SYS8B,		/* 18bpp, 8:8:2 */
+	SHMOB_DRM_IFACE_SYS8C,		/* 18bpp, 2:8:8 */
+	SHMOB_DRM_IFACE_SYS8D,		/* 16bpp, 8:8 */
+	SHMOB_DRM_IFACE_SYS9,		/* 18bpp, 9:9 */
+	SHMOB_DRM_IFACE_SYS12,		/* 24bpp, 12:12 */
+	SHMOB_DRM_IFACE_SYS16A,		/* 16bpp */
+	SHMOB_DRM_IFACE_SYS16B,		/* 18bpp, 16:2 */
+	SHMOB_DRM_IFACE_SYS16C,		/* 18bpp, 2:16 */
+	SHMOB_DRM_IFACE_SYS18,		/* 18bpp */
+	SHMOB_DRM_IFACE_SYS24,		/* 24bpp */
+};
+
+struct shmob_drm_backlight_data {
+	const char *name;
+	int max_brightness;
+	int (*get_brightness)(void);
+	int (*set_brightness)(int brightness);
+};
+
+struct shmob_drm_panel_data {
+	unsigned int width_mm;		/* Panel width in mm */
+	unsigned int height_mm;		/* Panel height in mm */
+	struct drm_mode_modeinfo mode;
+};
+
+struct shmob_drm_sys_interface_data {
+	unsigned int read_latch:6;
+	unsigned int read_setup:8;
+	unsigned int read_cycle:8;
+	unsigned int read_strobe:8;
+	unsigned int write_setup:8;
+	unsigned int write_cycle:8;
+	unsigned int write_strobe:8;
+	unsigned int cs_setup:3;
+	unsigned int vsync_active_high:1;
+	unsigned int vsync_dir_input:1;
+};
+
+#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
+#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
+#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
+#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
+#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
+
+struct shmob_drm_interface_data {
+	enum shmob_drm_interface interface;
+	struct shmob_drm_sys_interface_data sys;
+	unsigned int clk_div;
+	unsigned int flags;
+};
+
+struct shmob_drm_platform_data {
+	enum shmob_drm_clk_source clk_source;
+	struct shmob_drm_interface_data iface;
+	struct shmob_drm_panel_data panel;
+	struct shmob_drm_backlight_data backlight;
+	const struct sh_mobile_meram_cfg *meram;
+};
+
+#endif /* __SHMOB_DRM_H__ */