Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-fixes

A couple of bios parser fixes (one for ancient chips, another for new ones - important in Optimus configs).  Another to make sure KMS is enabled on certain Optimus configs, and a TTM failure path fix.

* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6:
  drm/nouveau/ttm: prevent double-free in nouveau_sgdma_create_ttm() failure path
  drm/nouveau/bios/init: fix thinko in INIT_CONFIGURE_MEM
  drm/nouveau/kms: enable for non-vga pci classes
  drm/nouveau/bios/init: stub opcode 0xaa
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
new file mode 100644
index 0000000..82cd1ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
@@ -0,0 +1,27 @@
+* Samsung Image Rotator
+
+Required properties:
+  - compatible : value should be one of the following:
+	(a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
+	(b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
+	(c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
+
+  - reg : Physical base address of the IP registers and length of memory
+	  mapped region.
+
+  - interrupts : Interrupt specifier for rotator interrupt, according to format
+		 specific to interrupt parent.
+
+  - clocks : Clock specifier for rotator clock, according to generic clock
+	     bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
+
+  - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
+
+Example:
+	rotator@12810000 {
+		compatible = "samsung,exynos4210-rotator";
+		reg = <0x12810000 0x1000>;
+		interrupts = <0 83 0>;
+		clocks = <&clock 278>;
+		clock-names = "rotator";
+	};
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 63bdb29..b3cd3eb 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,6 +12,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/pci_ids.h>
+#include <drm/i915_drm.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
 #include <asm/io_apic.h>
@@ -216,6 +217,157 @@
 
 }
 
+/*
+ * Systems with Intel graphics controllers set aside memory exclusively
+ * for gfx driver use.  This memory is not marked in the E820 as reserved
+ * or as RAM, and so is subject to overlap from E820 manipulation later
+ * in the boot process.  On some systems, MMIO space is allocated on top,
+ * despite the efforts of the "RAM buffer" approach, which simply rounds
+ * memory boundaries up to 64M to try to catch space that may decode
+ * as RAM and so is not suitable for MMIO.
+ *
+ * And yes, so far on current devices the base addr is always under 4G.
+ */
+static u32 __init intel_stolen_base(int num, int slot, int func)
+{
+	u32 base;
+
+	/*
+	 * For the PCI IDs in this quirk, the stolen base is always
+	 * in 0x5c, aka the BDSM register (yes that's really what
+	 * it's called).
+	 */
+	base = read_pci_config(num, slot, func, 0x5c);
+	base &= ~((1<<20) - 1);
+
+	return base;
+}
+
+#define KB(x)	((x) * 1024)
+#define MB(x)	(KB (KB (x)))
+#define GB(x)	(MB (KB (x)))
+
+static size_t __init gen3_stolen_size(int num, int slot, int func)
+{
+	size_t stolen_size;
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
+
+	switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+	case I855_GMCH_GMS_STOLEN_1M:
+		stolen_size = MB(1);
+		break;
+	case I855_GMCH_GMS_STOLEN_4M:
+		stolen_size = MB(4);
+		break;
+	case I855_GMCH_GMS_STOLEN_8M:
+		stolen_size = MB(8);
+		break;
+	case I855_GMCH_GMS_STOLEN_16M:
+		stolen_size = MB(16);
+		break;
+	case I855_GMCH_GMS_STOLEN_32M:
+		stolen_size = MB(32);
+		break;
+	case I915_GMCH_GMS_STOLEN_48M:
+		stolen_size = MB(48);
+		break;
+	case I915_GMCH_GMS_STOLEN_64M:
+		stolen_size = MB(64);
+		break;
+	case G33_GMCH_GMS_STOLEN_128M:
+		stolen_size = MB(128);
+		break;
+	case G33_GMCH_GMS_STOLEN_256M:
+		stolen_size = MB(256);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_96M:
+		stolen_size = MB(96);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_160M:
+		stolen_size = MB(160);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_224M:
+		stolen_size = MB(224);
+		break;
+	case INTEL_GMCH_GMS_STOLEN_352M:
+		stolen_size = MB(352);
+		break;
+	default:
+		stolen_size = 0;
+		break;
+	}
+
+	return stolen_size;
+}
+
+static size_t __init gen6_stolen_size(int num, int slot, int func)
+{
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+	gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+	return gmch_ctrl << 25; /* 32 MB units */
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
+static struct pci_device_id intel_stolen_ids[] __initdata = {
+	INTEL_I915G_IDS(gen3_stolen_size),
+	INTEL_I915GM_IDS(gen3_stolen_size),
+	INTEL_I945G_IDS(gen3_stolen_size),
+	INTEL_I945GM_IDS(gen3_stolen_size),
+	INTEL_VLV_M_IDS(gen3_stolen_size),
+	INTEL_VLV_D_IDS(gen3_stolen_size),
+	INTEL_PINEVIEW_IDS(gen3_stolen_size),
+	INTEL_I965G_IDS(gen3_stolen_size),
+	INTEL_G33_IDS(gen3_stolen_size),
+	INTEL_I965GM_IDS(gen3_stolen_size),
+	INTEL_GM45_IDS(gen3_stolen_size),
+	INTEL_G45_IDS(gen3_stolen_size),
+	INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
+	INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
+	INTEL_SNB_D_IDS(gen6_stolen_size),
+	INTEL_SNB_M_IDS(gen6_stolen_size),
+	INTEL_IVB_M_IDS(gen6_stolen_size),
+	INTEL_IVB_D_IDS(gen6_stolen_size),
+	INTEL_HSW_D_IDS(gen6_stolen_size),
+	INTEL_HSW_M_IDS(gen6_stolen_size),
+};
+
+static void __init intel_graphics_stolen(int num, int slot, int func)
+{
+	size_t size;
+	int i;
+	u32 start;
+	u16 device, subvendor, subdevice;
+
+	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
+	subvendor = read_pci_config_16(num, slot, func,
+				       PCI_SUBSYSTEM_VENDOR_ID);
+	subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
+
+	for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
+		if (intel_stolen_ids[i].device == device) {
+			stolen_size_fn stolen_size =
+				(stolen_size_fn)intel_stolen_ids[i].driver_data;
+			size = stolen_size(num, slot, func);
+			start = intel_stolen_base(num, slot, func);
+			if (size && start) {
+				/* Mark this space as reserved */
+				e820_add_region(start, size, E820_RESERVED);
+				sanitize_e820_map(e820.map,
+						  ARRAY_SIZE(e820.map),
+						  &e820.nr_map);
+			}
+			return;
+		}
+	}
+}
+
 #define QFLAG_APPLY_ONCE 	0x1
 #define QFLAG_APPLIED		0x2
 #define QFLAG_DONE		(QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -251,6 +403,8 @@
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
 	{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
+	  QFLAG_APPLY_ONCE, intel_graphics_stolen },
 	{}
 };
 
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb2..8492b68e 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@
 
 static inline void ast_open_key(struct ast_private *ast)
 {
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
 }
 
 #define AST_VIDMEM_SIZE_8M    0x00800000
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 772c62a..4752f22 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,11 +1,12 @@
 config DRM_EXYNOS
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
-	depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+	select VIDEOMODE_HELPERS
 	help
 	  Choose this option if you have a Samsung SoC EXYNOS chipset.
 	  If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@
 
 config DRM_EXYNOS_FIMD
 	bool "Exynos DRM FIMD"
-	depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+	depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
 	select FB_MODE_HELPERS
-	select VIDEOMODE_HELPERS
 	help
 	  Choose this option if you want to use Exynos FIMD for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 30ef41b..6a8c84e 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/i2c.h>
-
+#include <linux/of.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_hdmi.h"
@@ -41,13 +41,6 @@
 	return 0;
 }
 
-static struct i2c_device_id ddc_idtable[] = {
-	{"s5p_ddc", 0},
-	{"exynos5-hdmiddc", 0},
-	{ },
-};
-
-#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiddc",
@@ -57,15 +50,13 @@
 		/* end node */
 	}
 };
-#endif
 
 struct i2c_driver ddc_driver = {
 	.driver = {
 		.name = "exynos-hdmiddc",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(hdmiddc_match_types),
+		.of_match_table = hdmiddc_match_types,
 	},
-	.id_table	= ddc_idtable,
 	.probe		= s5p_ddc_probe,
 	.remove		= s5p_ddc_remove,
 	.command		= NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index b8ac06d..3445a0f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -149,10 +149,8 @@
 	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer) {
-		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+	if (!buffer)
 		return NULL;
-	}
 
 	buffer->size = size;
 	return buffer;
@@ -161,11 +159,6 @@
 void exynos_drm_fini_buf(struct drm_device *dev,
 				struct exynos_drm_gem_buf *buffer)
 {
-	if (!buffer) {
-		DRM_DEBUG_KMS("buffer is null.\n");
-		return;
-	}
-
 	kfree(buffer);
 	buffer = NULL;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 02a8bc5..e082efb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -17,6 +17,7 @@
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
 
 #define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\
 				drm_connector)
@@ -28,35 +29,6 @@
 	uint32_t		dpms;
 };
 
-/* convert exynos_video_timings to drm_display_mode */
-static inline void
-convert_to_display_mode(struct drm_display_mode *mode,
-			struct exynos_drm_panel_info *panel)
-{
-	struct fb_videomode *timing = &panel->timing;
-
-	mode->clock = timing->pixclock / 1000;
-	mode->vrefresh = timing->refresh;
-
-	mode->hdisplay = timing->xres;
-	mode->hsync_start = mode->hdisplay + timing->right_margin;
-	mode->hsync_end = mode->hsync_start + timing->hsync_len;
-	mode->htotal = mode->hsync_end + timing->left_margin;
-
-	mode->vdisplay = timing->yres;
-	mode->vsync_start = mode->vdisplay + timing->lower_margin;
-	mode->vsync_end = mode->vsync_start + timing->vsync_len;
-	mode->vtotal = mode->vsync_end + timing->upper_margin;
-	mode->width_mm = panel->width_mm;
-	mode->height_mm = panel->height_mm;
-
-	if (timing->vmode & FB_VMODE_INTERLACED)
-		mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
-	if (timing->vmode & FB_VMODE_DOUBLE)
-		mode->flags |= DRM_MODE_FLAG_DBLSCAN;
-}
-
 static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 {
 	struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@
 			return 0;
 		}
 
-		convert_to_display_mode(mode, panel);
+		drm_display_mode_from_videomode(&panel->vm, mode);
+		mode->width_mm = panel->width_mm;
+		mode->height_mm = panel->height_mm;
 		connector->display_info.width_mm = mode->width_mm;
 		connector->display_info.height_mm = mode->height_mm;
 
@@ -278,10 +252,8 @@
 	int err;
 
 	exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
-	if (!exynos_connector) {
-		DRM_ERROR("failed to allocate connector\n");
+	if (!exynos_connector)
 		return NULL;
-	}
 
 	connector = &exynos_connector->drm_connector;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 14f5c1d..ebc0150 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
+#include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_plane.h"
@@ -324,10 +325,8 @@
 	struct drm_crtc *crtc;
 
 	exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
-	if (!exynos_crtc) {
-		DRM_ERROR("failed to allocate exynos crtc\n");
+	if (!exynos_crtc)
 		return -ENOMEM;
-	}
 
 	exynos_crtc->pipe = nr;
 	exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fd76449..59827cc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -11,6 +11,7 @@
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
+#include "exynos_drm_dmabuf.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 
@@ -230,7 +231,6 @@
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 	if (!buffer) {
-		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
 		ret = -ENOMEM;
 		goto err_unmap_attach;
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index df81d3c..bb82ef7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -47,10 +47,8 @@
 	int nr;
 
 	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
-	if (!private) {
-		DRM_ERROR("failed to allocate private\n");
+	if (!private)
 		return -ENOMEM;
-	}
 
 	INIT_LIST_HEAD(&private->pageflip_event_list);
 	dev->dev_private = (void *)private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index a99a033..06f1b2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -324,10 +324,8 @@
 		return NULL;
 
 	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
-	if (!exynos_encoder) {
-		DRM_ERROR("failed to allocate encoder\n");
+	if (!exynos_encoder)
 		return NULL;
-	}
 
 	exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
 	exynos_encoder->manager = manager;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c2d149f..ea39e0e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -156,10 +156,8 @@
 	}
 
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb) {
-		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
 	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@
 	int i, ret;
 
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb) {
-		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
 	if (!obj) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8e60bd6..78e868b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,9 +16,11 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
@@ -165,8 +167,18 @@
 
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	/* 0 means to allocate physically continuous memory */
-	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
+	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+	/*
+	 * If physically contiguous memory allocation fails and if IOMMU is
+	 * supported then try to get buffer from non physically contiguous
+	 * memory area.
+	 */
+	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
+							size);
+	}
+
 	if (IS_ERR(exynos_gem_obj)) {
 		ret = PTR_ERR(exynos_gem_obj);
 		goto err_release_framebuffer;
@@ -236,10 +248,8 @@
 		return 0;
 
 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
-	if (!fbdev) {
-		DRM_ERROR("failed to allocate drm fbdev.\n");
+	if (!fbdev)
 		return -ENOMEM;
-	}
 
 	private->fb_helper = helper = &fbdev->drm_fb_helper;
 	helper->funcs = &exynos_drm_fb_helper_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6e047bd..8adfc8f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,10 +17,12 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-fimc.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_fimc.h"
 
@@ -1343,10 +1345,8 @@
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1c263da..868a14d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -16,10 +16,12 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/of_display_timing.h>
+#include <video/of_videomode.h>
 #include <video/samsung_fimd.h>
 #include <drm/exynos_drm.h>
 
@@ -35,6 +37,8 @@
  * CPU Interface.
  */
 
+#define FIMD_DEFAULT_FRAMERATE 60
+
 /* position control register for hardware window 0, 2 ~ 4.*/
 #define VIDOSD_A(win)		(VIDOSD_BASE + 0x00 + (win) * 16)
 #define VIDOSD_B(win)		(VIDOSD_BASE + 0x04 + (win) * 16)
@@ -65,11 +69,13 @@
 
 	unsigned int has_shadowcon:1;
 	unsigned int has_clksel:1;
+	unsigned int has_limited_fmt:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
 	.timing_base = 0x0,
 	.has_clksel = 1,
+	.has_limited_fmt = 1,
 };
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -90,6 +96,7 @@
 	unsigned int		fb_width;
 	unsigned int		fb_height;
 	unsigned int		bpp;
+	unsigned int		pixel_format;
 	dma_addr_t		dma_addr;
 	unsigned int		buf_offsize;
 	unsigned int		line_size;	/* bytes */
@@ -115,11 +122,10 @@
 	wait_queue_head_t		wait_vsync_queue;
 	atomic_t			wait_vsync_event;
 
-	struct exynos_drm_panel_info *panel;
+	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id fimd_driver_dt_match[] = {
 	{ .compatible = "samsung,s3c6400-fimd",
 	  .data = &s3c64xx_fimd_driver_data },
@@ -129,21 +135,14 @@
 	  .data = &exynos5_fimd_driver_data },
 	{},
 };
-#endif
 
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
-#ifdef CONFIG_OF
 	const struct of_device_id *of_id =
 			of_match_device(fimd_driver_dt_match, &pdev->dev);
 
-	if (of_id)
-		return (struct fimd_driver_data *)of_id->data;
-#endif
-
-	return (struct fimd_driver_data *)
-		platform_get_device_id(pdev)->driver_data;
+	return (struct fimd_driver_data *)of_id->data;
 }
 
 static bool fimd_display_is_connected(struct device *dev)
@@ -157,7 +156,7 @@
 {
 	struct fimd_context *ctx = get_fimd_context(dev);
 
-	return ctx->panel;
+	return &ctx->panel;
 }
 
 static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -237,8 +236,8 @@
 static void fimd_commit(struct device *dev)
 {
 	struct fimd_context *ctx = get_fimd_context(dev);
-	struct exynos_drm_panel_info *panel = ctx->panel;
-	struct fb_videomode *timing = &panel->timing;
+	struct exynos_drm_panel_info *panel = &ctx->panel;
+	struct videomode *vm = &panel->vm;
 	struct fimd_driver_data *driver_data;
 	u32 val;
 
@@ -250,22 +249,22 @@
 	writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
 
 	/* setup vertical timing values. */
-	val = VIDTCON0_VBPD(timing->upper_margin - 1) |
-	       VIDTCON0_VFPD(timing->lower_margin - 1) |
-	       VIDTCON0_VSPW(timing->vsync_len - 1);
+	val = VIDTCON0_VBPD(vm->vback_porch - 1) |
+	       VIDTCON0_VFPD(vm->vfront_porch - 1) |
+	       VIDTCON0_VSPW(vm->vsync_len - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
 
 	/* setup horizontal timing values.  */
-	val = VIDTCON1_HBPD(timing->left_margin - 1) |
-	       VIDTCON1_HFPD(timing->right_margin - 1) |
-	       VIDTCON1_HSPW(timing->hsync_len - 1);
+	val = VIDTCON1_HBPD(vm->hback_porch - 1) |
+	       VIDTCON1_HFPD(vm->hfront_porch - 1) |
+	       VIDTCON1_HSPW(vm->hsync_len - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
 
 	/* setup horizontal and vertical display size. */
-	val = VIDTCON2_LINEVAL(timing->yres - 1) |
-	       VIDTCON2_HOZVAL(timing->xres - 1) |
-	       VIDTCON2_LINEVAL_E(timing->yres - 1) |
-	       VIDTCON2_HOZVAL_E(timing->xres - 1);
+	val = VIDTCON2_LINEVAL(vm->vactive - 1) |
+	       VIDTCON2_HOZVAL(vm->hactive - 1) |
+	       VIDTCON2_LINEVAL_E(vm->vactive - 1) |
+	       VIDTCON2_HOZVAL_E(vm->hactive - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
 	/* setup clock source, clock divider, enable dma. */
@@ -396,6 +395,7 @@
 	win_data->fb_height = overlay->fb_height;
 	win_data->dma_addr = overlay->dma_addr[0] + offset;
 	win_data->bpp = overlay->bpp;
+	win_data->pixel_format = overlay->pixel_format;
 	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
 				(overlay->bpp >> 3);
 	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -417,39 +417,38 @@
 
 	val = WINCONx_ENWIN;
 
-	switch (win_data->bpp) {
-	case 1:
-		val |= WINCON0_BPPMODE_1BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_4WORD;
-		break;
-	case 2:
-		val |= WINCON0_BPPMODE_2BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_8WORD;
-		break;
-	case 4:
-		val |= WINCON0_BPPMODE_4BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_8WORD;
-		break;
-	case 8:
+	/*
+	 * In case of s3c64xx, window 0 doesn't support alpha channel.
+	 * So the request format is ARGB8888 then change it to XRGB8888.
+	 */
+	if (ctx->driver_data->has_limited_fmt && !win) {
+		if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
+			win_data->pixel_format = DRM_FORMAT_XRGB8888;
+	}
+
+	switch (win_data->pixel_format) {
+	case DRM_FORMAT_C8:
 		val |= WINCON0_BPPMODE_8BPP_PALETTE;
 		val |= WINCONx_BURSTLEN_8WORD;
 		val |= WINCONx_BYTSWP;
 		break;
-	case 16:
+	case DRM_FORMAT_XRGB1555:
+		val |= WINCON0_BPPMODE_16BPP_1555;
+		val |= WINCONx_HAWSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case DRM_FORMAT_RGB565:
 		val |= WINCON0_BPPMODE_16BPP_565;
 		val |= WINCONx_HAWSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
-	case 24:
+	case DRM_FORMAT_XRGB8888:
 		val |= WINCON0_BPPMODE_24BPP_888;
 		val |= WINCONx_WSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
-	case 32:
-		val |= WINCON1_BPPMODE_28BPP_A4888
+	case DRM_FORMAT_ARGB8888:
+		val |= WINCON1_BPPMODE_25BPP_A1888
 			| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
 		val |= WINCONx_WSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
@@ -746,45 +745,54 @@
 		drm_iommu_detach_device(drm_dev, dev);
 }
 
-static int fimd_calc_clkdiv(struct fimd_context *ctx,
-			    struct fb_videomode *timing)
+static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
 {
-	unsigned long clk = clk_get_rate(ctx->lcd_clk);
-	u32 retrace;
-	u32 clkdiv;
-	u32 best_framerate = 0;
-	u32 framerate;
+	struct videomode *vm = &ctx->panel.vm;
+	unsigned long clk;
 
-	retrace = timing->left_margin + timing->hsync_len +
-				timing->right_margin + timing->xres;
-	retrace *= timing->upper_margin + timing->vsync_len +
-				timing->lower_margin + timing->yres;
-
-	/* default framerate is 60Hz */
-	if (!timing->refresh)
-		timing->refresh = 60;
-
-	clk /= retrace;
-
-	for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
-		int tmp;
-
-		/* get best framerate */
-		framerate = clk / clkdiv;
-		tmp = timing->refresh - framerate;
-		if (tmp < 0) {
-			best_framerate = framerate;
-			continue;
-		} else {
-			if (!best_framerate)
-				best_framerate = framerate;
-			else if (tmp < (best_framerate - framerate))
-				best_framerate = framerate;
-			break;
-		}
+	ctx->bus_clk = devm_clk_get(dev, "fimd");
+	if (IS_ERR(ctx->bus_clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(ctx->bus_clk);
 	}
 
-	return clkdiv;
+	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+	if (IS_ERR(ctx->lcd_clk)) {
+		dev_err(dev, "failed to get lcd clock\n");
+		return PTR_ERR(ctx->lcd_clk);
+	}
+
+	clk = clk_get_rate(ctx->lcd_clk);
+	if (clk == 0) {
+		dev_err(dev, "error getting sclk_fimd clock rate\n");
+		return -EINVAL;
+	}
+
+	if (vm->pixelclock == 0) {
+		unsigned long c;
+		c = vm->hactive + vm->hback_porch + vm->hfront_porch +
+		    vm->hsync_len;
+		c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
+		     vm->vsync_len;
+		vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
+		if (vm->pixelclock == 0) {
+			dev_err(dev, "incorrect display timings\n");
+			return -EINVAL;
+		}
+		dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
+			 vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
+	}
+	ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
+	if (ctx->clkdiv > 256) {
+		dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
+			 ctx->clkdiv);
+		ctx->clkdiv = 256;
+	}
+	vm->pixelclock = clk / ctx->clkdiv;
+	DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
+		      ctx->clkdiv);
+
+	return 0;
 }
 
 static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -876,59 +884,53 @@
 	return 0;
 }
 
+static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
+{
+	struct videomode *vm;
+	int ret;
+
+	vm = &ctx->panel.vm;
+	ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
+	if (ret) {
+		DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+		return ret;
+	}
+
+	if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_VSYNC;
+	if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_HSYNC;
+	if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_VDEN;
+	if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+		ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+	return 0;
+}
+
 static int fimd_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct fimd_context *ctx;
 	struct exynos_drm_subdrv *subdrv;
-	struct exynos_drm_fimd_pdata *pdata;
-	struct exynos_drm_panel_info *panel;
 	struct resource *res;
 	int win;
 	int ret = -EINVAL;
 
-	if (dev->of_node) {
-		pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-		if (!pdata) {
-			DRM_ERROR("memory allocation for pdata failed\n");
-			return -ENOMEM;
-		}
-
-		ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
-					OF_USE_NATIVE_MODE);
-		if (ret) {
-			DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
-			return ret;
-		}
-	} else {
-		pdata = dev->platform_data;
-		if (!pdata) {
-			DRM_ERROR("no platform data specified\n");
-			return -EINVAL;
-		}
-	}
-
-	panel = &pdata->panel;
-	if (!panel) {
-		dev_err(dev, "panel is null.\n");
-		return -EINVAL;
-	}
+	if (!dev->of_node)
+		return -ENODEV;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->bus_clk = devm_clk_get(dev, "fimd");
-	if (IS_ERR(ctx->bus_clk)) {
-		dev_err(dev, "failed to get bus clock\n");
-		return PTR_ERR(ctx->bus_clk);
-	}
+	ret = fimd_get_platform_data(ctx, dev);
+	if (ret)
+		return ret;
 
-	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
-	if (IS_ERR(ctx->lcd_clk)) {
-		dev_err(dev, "failed to get lcd clock\n");
-		return PTR_ERR(ctx->lcd_clk);
-	}
+	ret = fimd_configure_clocks(ctx, dev);
+	if (ret)
+		return ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -952,10 +954,6 @@
 	}
 
 	ctx->driver_data = drm_fimd_get_driver_data(pdev);
-	ctx->vidcon0 = pdata->vidcon0;
-	ctx->vidcon1 = pdata->vidcon1;
-	ctx->default_win = pdata->default_win;
-	ctx->panel = panel;
 	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
 
@@ -973,12 +971,6 @@
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
 
-	ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
-	panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
-
-	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
-			panel->timing.pixclock, ctx->clkdiv);
-
 	for (win = 0; win < WINDOWS_NR; win++)
 		fimd_clear_win(ctx, win);
 
@@ -1067,20 +1059,6 @@
 }
 #endif
 
-static struct platform_device_id fimd_driver_ids[] = {
-	{
-		.name		= "s3c64xx-fb",
-		.driver_data	= (unsigned long)&s3c64xx_fimd_driver_data,
-	}, {
-		.name		= "exynos4-fb",
-		.driver_data	= (unsigned long)&exynos4_fimd_driver_data,
-	}, {
-		.name		= "exynos5-fb",
-		.driver_data	= (unsigned long)&exynos5_fimd_driver_data,
-	},
-	{},
-};
-
 static const struct dev_pm_ops fimd_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
 	SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1089,11 +1067,10 @@
 struct platform_driver fimd_driver = {
 	.probe		= fimd_probe,
 	.remove		= fimd_remove,
-	.id_table       = fimd_driver_ids,
 	.driver		= {
 		.name	= "exynos4-fb",
 		.owner	= THIS_MODULE,
 		.pm	= &fimd_pm_ops,
-		.of_match_table = of_match_ptr(fimd_driver_dt_match),
+		.of_match_table = fimd_driver_dt_match,
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index eddea49..3271fd4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -23,6 +23,7 @@
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
+#include "exynos_drm_g2d.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
@@ -446,10 +447,8 @@
 	}
 
 	g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
-	if (!g2d_userptr) {
-		DRM_ERROR("failed to allocate g2d_userptr.\n");
+	if (!g2d_userptr)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	atomic_set(&g2d_userptr->refcount, 1);
 
@@ -499,7 +498,6 @@
 
 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 	if (!sgt) {
-		DRM_ERROR("failed to allocate sg table.\n");
 		ret = -ENOMEM;
 		goto err_free_userptr;
 	}
@@ -808,17 +806,8 @@
 	int ret;
 
 	ret = pm_runtime_get_sync(g2d->dev);
-	if (ret < 0) {
-		dev_warn(g2d->dev, "failed pm power on.\n");
+	if (ret < 0)
 		return;
-	}
-
-	ret = clk_prepare_enable(g2d->gate_clk);
-	if (ret < 0) {
-		dev_warn(g2d->dev, "failed to enable clock.\n");
-		pm_runtime_put_sync(g2d->dev);
-		return;
-	}
 
 	writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
 	writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -871,7 +860,6 @@
 					    runqueue_work);
 
 	mutex_lock(&g2d->runqueue_mutex);
-	clk_disable_unprepare(g2d->gate_clk);
 	pm_runtime_put_sync(g2d->dev);
 
 	complete(&g2d->runqueue_node->complete);
@@ -1096,8 +1084,6 @@
 
 		e = kzalloc(sizeof(*node->event), GFP_KERNEL);
 		if (!e) {
-			dev_err(dev, "failed to allocate event\n");
-
 			spin_lock_irqsave(&drm_dev->event_lock, flags);
 			file->event_space += sizeof(e->event);
 			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1327,10 +1313,8 @@
 	struct exynos_drm_g2d_private *g2d_priv;
 
 	g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
-	if (!g2d_priv) {
-		dev_err(dev, "failed to allocate g2d private data\n");
+	if (!g2d_priv)
 		return -ENOMEM;
-	}
 
 	g2d_priv->dev = dev;
 	file_priv->g2d_priv = g2d_priv;
@@ -1386,10 +1370,8 @@
 	int ret;
 
 	g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
-	if (!g2d) {
-		dev_err(dev, "failed to allocate driver data\n");
+	if (!g2d)
 		return -ENOMEM;
-	}
 
 	g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
 			sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1524,14 +1506,38 @@
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int g2d_runtime_suspend(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
 
-#ifdef CONFIG_OF
+	clk_disable_unprepare(g2d->gate_clk);
+
+	return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(g2d->gate_clk);
+	if (ret < 0)
+		dev_warn(dev, "failed to enable clock.\n");
+
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops g2d_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+	SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+};
+
 static const struct of_device_id exynos_g2d_match[] = {
 	{ .compatible = "samsung,exynos5250-g2d" },
 	{},
 };
-#endif
 
 struct platform_driver g2d_driver = {
 	.probe		= g2d_probe,
@@ -1540,6 +1546,6 @@
 		.name	= "s5p-g2d",
 		.owner	= THIS_MODULE,
 		.pm	= &g2d_pm_ops,
-		.of_match_table = of_match_ptr(exynos_g2d_match),
+		.of_match_table = exynos_g2d_match,
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f3c6f40..49f9cd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -18,6 +18,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
 
 static unsigned int convert_to_vm_err_msg(int msg)
 {
@@ -191,10 +192,8 @@
 	int ret;
 
 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
-	if (!exynos_gem_obj) {
-		DRM_ERROR("failed to allocate exynos gem object\n");
+	if (!exynos_gem_obj)
 		return NULL;
-	}
 
 	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@
 
 	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
 						EXYNOS_BO_WC, args->size);
+	/*
+	 * If physically contiguous memory allocation fails and if IOMMU is
+	 * supported then try to get buffer from non physically contiguous
+	 * memory area.
+	 */
+	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+		dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+		exynos_gem_obj = exynos_drm_gem_create(dev,
+					EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+					args->size);
+	}
+
 	if (IS_ERR(exynos_gem_obj))
 		return PTR_ERR(exynos_gem_obj);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90b8a1a..cd6aebd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -20,6 +20,7 @@
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-gsc.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_gsc.h"
 
@@ -1337,10 +1338,8 @@
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 8d3bc01..8548b97 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -403,10 +403,8 @@
 	struct drm_hdmi_context *ctx;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+	if (!ctx)
 		return -ENOMEM;
-	}
 
 	subdrv = &ctx->subdrv;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 3799d5c..fb8db03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -47,10 +47,16 @@
 
 	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
 					GFP_KERNEL);
+	if (!dev->dma_parms)
+		goto error;
+
 	dma_set_max_seg_size(dev, 0xffffffffu);
 	dev->archdata.mapping = mapping;
 
 	return 0;
+error:
+	arm_iommu_release_mapping(mapping);
+	return -ENOMEM;
 }
 
 /*
@@ -91,6 +97,9 @@
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
 					sizeof(*subdrv_dev->dma_parms),
 					GFP_KERNEL);
+	if (!subdrv_dev->dma_parms)
+		return -ENOMEM;
+
 	dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
 
 	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d2b6ab4..824e070 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -408,10 +408,8 @@
 	struct drm_exynos_ipp_cmd_work *cmd_work;
 
 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
-	if (!cmd_work) {
-		DRM_ERROR("failed to alloc cmd_work.\n");
+	if (!cmd_work)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
 
@@ -423,10 +421,8 @@
 	struct drm_exynos_ipp_event_work *event_work;
 
 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
-	if (!event_work) {
-		DRM_ERROR("failed to alloc event_work.\n");
+	if (!event_work)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
 
@@ -482,10 +478,8 @@
 
 	/* allocate command node */
 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
-	if (!c_node) {
-		DRM_ERROR("failed to allocate map node.\n");
+	if (!c_node)
 		return -ENOMEM;
-	}
 
 	/* create property id */
 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -694,10 +688,8 @@
 	mutex_lock(&c_node->mem_lock);
 
 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
-	if (!m_node) {
-		DRM_ERROR("failed to allocate queue node.\n");
+	if (!m_node)
 		goto err_unlock;
-	}
 
 	/* clear base address for error handling */
 	memset(&buf_info, 0x0, sizeof(buf_info));
@@ -798,9 +790,7 @@
 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
 
 	e = kzalloc(sizeof(*e), GFP_KERNEL);
-
 	if (!e) {
-		DRM_ERROR("failed to allocate event.\n");
 		spin_lock_irqsave(&drm_dev->event_lock, flags);
 		file->event_space += sizeof(e->event);
 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1780,10 +1770,8 @@
 	struct exynos_drm_ipp_private *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		DRM_ERROR("failed to allocate priv.\n");
+	if (!priv)
 		return -ENOMEM;
-	}
 	priv->dev = dev;
 	file_priv->ipp_priv = priv;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 6ee55e6..fcb0652 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -16,6 +16,7 @@
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
 
 #define to_exynos_plane(x)	container_of(x, struct exynos_plane, base)
 
@@ -264,10 +265,8 @@
 	int err;
 
 	exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
-	if (!exynos_plane) {
-		DRM_ERROR("failed to allocate plane\n");
+	if (!exynos_plane)
 		return NULL;
-	}
 
 	err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
 			      &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 49669aa..7b90168 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -21,6 +21,7 @@
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
 #include "exynos_drm.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -471,10 +472,8 @@
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -631,21 +630,96 @@
 	return 0;
 }
 
+static struct rot_limit_table rot_limit_tbl_4210 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_64K,
+		.max_h = SZ_64K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_16K,
+		.max_h = SZ_16K,
+		.align = 2,
+	},
+};
+
+static struct rot_limit_table rot_limit_tbl_4x12 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 2,
+	},
+};
+
+static struct rot_limit_table rot_limit_tbl_5250 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 1,
+	},
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+	{
+		.compatible = "samsung,exynos4210-rotator",
+		.data = &rot_limit_tbl_4210,
+	},
+	{
+		.compatible = "samsung,exynos4212-rotator",
+		.data = &rot_limit_tbl_4x12,
+	},
+	{
+		.compatible = "samsung,exynos5250-rotator",
+		.data = &rot_limit_tbl_5250,
+	},
+	{},
+};
+
 static int rotator_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct rot_context *rot;
 	struct exynos_drm_ippdrv *ippdrv;
+	const struct of_device_id *match;
 	int ret;
 
-	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
-	if (!rot) {
-		dev_err(dev, "failed to allocate rot\n");
-		return -ENOMEM;
+	if (!dev->of_node) {
+		dev_err(dev, "cannot find of_node.\n");
+		return -ENODEV;
 	}
 
-	rot->limit_tbl = (struct rot_limit_table *)
-				platform_get_device_id(pdev)->driver_data;
+	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+	if (!rot)
+		return -ENOMEM;
+
+	match = of_match_node(exynos_rotator_match, dev->of_node);
+	if (!match) {
+		dev_err(dev, "failed to match node\n");
+		return -ENODEV;
+	}
+	rot->limit_tbl = (struct rot_limit_table *)match->data;
 
 	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -717,31 +791,6 @@
 	return 0;
 }
 
-static struct rot_limit_table rot_limit_tbl = {
-	.ycbcr420_2p = {
-		.min_w = 32,
-		.min_h = 32,
-		.max_w = SZ_32K,
-		.max_h = SZ_32K,
-		.align = 3,
-	},
-	.rgb888 = {
-		.min_w = 8,
-		.min_h = 8,
-		.max_w = SZ_8K,
-		.max_h = SZ_8K,
-		.align = 2,
-	},
-};
-
-static struct platform_device_id rotator_driver_ids[] = {
-	{
-		.name		= "exynos-rot",
-		.driver_data	= (unsigned long)&rot_limit_tbl,
-	},
-	{},
-};
-
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
 	if (enable) {
@@ -803,10 +852,10 @@
 struct platform_driver rotator_driver = {
 	.probe		= rotator_probe,
 	.remove		= rotator_remove,
-	.id_table	= rotator_driver_ids,
 	.driver		= {
 		.name	= "exynos-rot",
 		.owner	= THIS_MODULE,
 		.pm	= &rotator_pm_ops,
+		.of_match_table = exynos_rotator_match,
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index c57c565..4400330 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_vidi.h"
 
 /* vidi has totally three virtual windows. */
 #define WINDOWS_NR		3
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2f5c694..a0e10ae 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -32,6 +32,7 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/of_gpio.h>
 
 #include <drm/exynos_drm.h>
@@ -1824,10 +1825,8 @@
 
 	res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
 		sizeof(res->regul_bulk[0]), GFP_KERNEL);
-	if (!res->regul_bulk) {
-		DRM_ERROR("failed to get memory for regulators\n");
+	if (!res->regul_bulk)
 		goto fail;
-	}
 	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
 		res->regul_bulk[i].supply = supply[i];
 		res->regul_bulk[i].consumer = NULL;
@@ -1859,7 +1858,6 @@
 		hdmi_hdmiphy = hdmiphy;
 }
 
-#ifdef CONFIG_OF
 static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
 					(struct device *dev)
 {
@@ -1868,10 +1866,8 @@
 	u32 value;
 
 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-	if (!pd) {
-		DRM_ERROR("memory allocation for pdata failed\n");
+	if (!pd)
 		goto err_data;
-	}
 
 	if (!of_find_property(np, "hpd-gpio", &value)) {
 		DRM_ERROR("no hpd gpio property found\n");
@@ -1885,33 +1881,7 @@
 err_data:
 	return NULL;
 }
-#else
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
-					(struct device *dev)
-{
-	return NULL;
-}
-#endif
 
-static struct platform_device_id hdmi_driver_types[] = {
-	{
-		.name		= "s5pv210-hdmi",
-		.driver_data    = HDMI_TYPE13,
-	}, {
-		.name		= "exynos4-hdmi",
-		.driver_data    = HDMI_TYPE13,
-	}, {
-		.name		= "exynos4-hdmi14",
-		.driver_data	= HDMI_TYPE14,
-	}, {
-		.name		= "exynos5-hdmi",
-		.driver_data	= HDMI_TYPE14,
-	}, {
-		/* end node */
-	}
-};
-
-#ifdef CONFIG_OF
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -1923,7 +1893,6 @@
 		/* end node */
 	}
 };
-#endif
 
 static int hdmi_probe(struct platform_device *pdev)
 {
@@ -1932,36 +1901,23 @@
 	struct hdmi_context *hdata;
 	struct s5p_hdmi_platform_data *pdata;
 	struct resource *res;
+	const struct of_device_id *match;
 	int ret;
 
-	if (dev->of_node) {
-		pdata = drm_hdmi_dt_parse_pdata(dev);
-		if (IS_ERR(pdata)) {
-			DRM_ERROR("failed to parse dt\n");
-			return PTR_ERR(pdata);
-		}
-	} else {
-		pdata = dev->platform_data;
-	}
+	 if (!dev->of_node)
+		return -ENODEV;
 
-	if (!pdata) {
-		DRM_ERROR("no platform data specified\n");
+	pdata = drm_hdmi_dt_parse_pdata(dev);
+	if (!pdata)
 		return -EINVAL;
-	}
 
-	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
-								GFP_KERNEL);
-	if (!drm_hdmi_ctx) {
-		DRM_ERROR("failed to allocate common hdmi context.\n");
+	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+	if (!drm_hdmi_ctx)
 		return -ENOMEM;
-	}
 
-	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
-								GFP_KERNEL);
-	if (!hdata) {
-		DRM_ERROR("out of memory\n");
+	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
+	if (!hdata)
 		return -ENOMEM;
-	}
 
 	mutex_init(&hdata->hdmi_mutex);
 
@@ -1970,23 +1926,15 @@
 
 	platform_set_drvdata(pdev, drm_hdmi_ctx);
 
-	if (dev->of_node) {
-		const struct of_device_id *match;
-		match = of_match_node(of_match_ptr(hdmi_match_types),
-					dev->of_node);
-		if (match == NULL)
-			return -ENODEV;
-		hdata->type = (enum hdmi_type)match->data;
-	} else {
-		hdata->type = (enum hdmi_type)platform_get_device_id
-					(pdev)->driver_data;
-	}
+	match = of_match_node(hdmi_match_types, dev->of_node);
+	if (!match)
+		return -ENODEV;
+	hdata->type = (enum hdmi_type)match->data;
 
 	hdata->hpd_gpio = pdata->hpd_gpio;
 	hdata->dev = dev;
 
 	ret = hdmi_resources_init(hdata);
-
 	if (ret) {
 		DRM_ERROR("hdmi_resources_init failed\n");
 		return -EINVAL;
@@ -2141,11 +2089,10 @@
 struct platform_driver hdmi_driver = {
 	.probe		= hdmi_probe,
 	.remove		= hdmi_remove,
-	.id_table = hdmi_driver_types,
 	.driver		= {
 		.name	= "exynos-hdmi",
 		.owner	= THIS_MODULE,
 		.pm	= &hdmi_pm_ops,
-		.of_match_table = of_match_ptr(hdmi_match_types),
+		.of_match_table = hdmi_match_types,
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6e320ae..59abb14 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,6 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_hdmi.h"
@@ -39,13 +40,6 @@
 	return 0;
 }
 
-static const struct i2c_device_id hdmiphy_id[] = {
-	{ "s5p_hdmiphy", 0 },
-	{ "exynos5-hdmiphy", 0 },
-	{ },
-};
-
-#ifdef CONFIG_OF
 static struct of_device_id hdmiphy_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiphy",
@@ -57,15 +51,13 @@
 		/* end node */
 	}
 };
-#endif
 
 struct i2c_driver hdmiphy_driver = {
 	.driver = {
 		.name	= "exynos-hdmiphy",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(hdmiphy_match_types),
+		.of_match_table = hdmiphy_match_types,
 	},
-	.id_table = hdmiphy_id,
 	.probe		= hdmiphy_probe,
 	.remove		= hdmiphy_remove,
 	.command		= NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c9a137c..63bc5f9 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -30,6 +30,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <linux/of.h>
 
 #include <drm/exynos_drm.h>
 
@@ -1185,16 +1186,12 @@
 
 	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
 								GFP_KERNEL);
-	if (!drm_hdmi_ctx) {
-		DRM_ERROR("failed to allocate common hdmi context.\n");
+	if (!drm_hdmi_ctx)
 		return -ENOMEM;
-	}
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		DRM_ERROR("failed to alloc mixer context.\n");
+	if (!ctx)
 		return -ENOMEM;
-	}
 
 	mutex_init(&ctx->mixer_mutex);
 
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index c2bd711..b1f8fc6 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -596,6 +596,10 @@
 		cts_n = CTS_N_M(3) | CTS_N_K(3);
 		ca_i2s = CA_I2S_CA_I2S(0);
 		break;
+
+	default:
+		BUG();
+		return;
 	}
 
 	reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab924..a6f4cb5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@
 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		u32 rpstat, cagf;
+		u32 rpstat, cagf, reqf;
 		u32 rpupei, rpcurup, rpprevup;
 		u32 rpdownei, rpcurdown, rpprevdown;
 		int max_freq;
@@ -869,6 +869,14 @@
 
 		gen6_gt_force_wake_get(dev_priv);
 
+		reqf = I915_READ(GEN6_RPNSWREQ);
+		reqf &= ~GEN6_TURBO_DISABLE;
+		if (IS_HASWELL(dev))
+			reqf >>= 24;
+		else
+			reqf >>= 25;
+		reqf *= GT_FREQUENCY_MULTIPLIER;
+
 		rpstat = I915_READ(GEN6_RPSTAT1);
 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@
 			   gt_perf_status & 0xff);
 		seq_printf(m, "Render p-state limit: %d\n",
 			   rp_state_limits & 0xff);
+		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 		seq_printf(m, "CAGF: %dMHz\n", cagf);
 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 			   GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa091..9b265a4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@
 	 * then we do not take part in VGA arbitration and the
 	 * vga_client_register() fails with -ENODEV.
 	 */
-	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-	if (ret && ret != -ENODEV)
-		goto out;
+	if (!HAS_PCH_SPLIT(dev)) {
+		ret = vga_client_register(dev->pdev, dev, NULL,
+					  i915_vga_set_decode);
+		if (ret && ret != -ENODEV)
+			goto out;
+	}
 
 	intel_register_dsm_handler();
 
@@ -1348,6 +1351,12 @@
 	 */
 	intel_fbdev_initial_config(dev);
 
+	/*
+	 * Must do this after fbcon init so that
+	 * vgacon_save_screen() works during the handover.
+	 */
+	i915_disable_vga_mem(dev);
+
 	/* Only enable hotplug handling once the fbdev is fully set up. */
 	dev_priv->enable_hotplug_processing = true;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ea..69d8ed5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
-#define INTEL_VGA_DEVICE(id, info) {		\
-	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
-	.class_mask = 0xff0000,			\
-	.vendor = 0x8086,			\
-	.device = id,				\
-	.subvendor = PCI_ANY_ID,		\
-	.subdevice = PCI_ANY_ID,		\
-	.driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) {		\
-	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
-	.class_mask = 0xff0000,			\
-	.vendor = 0x8086,			\
-	.device = 0x16a,			\
-	.subvendor = 0x152d,			\
-	.subdevice = 0x8990,			\
-	.driver_data = (unsigned long) info }
-
-
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@
 	.has_vebox_ring = 1,
 };
 
+/*
+ * Make sure any device matches here are from most specific to most
+ * general.  For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+	INTEL_I830_IDS(&intel_i830_info),	\
+	INTEL_I845G_IDS(&intel_845g_info),	\
+	INTEL_I85X_IDS(&intel_i85x_info),	\
+	INTEL_I865G_IDS(&intel_i865g_info),	\
+	INTEL_I915G_IDS(&intel_i915g_info),	\
+	INTEL_I915GM_IDS(&intel_i915gm_info),	\
+	INTEL_I945G_IDS(&intel_i945g_info),	\
+	INTEL_I945GM_IDS(&intel_i945gm_info),	\
+	INTEL_I965G_IDS(&intel_i965g_info),	\
+	INTEL_G33_IDS(&intel_g33_info),		\
+	INTEL_I965GM_IDS(&intel_i965gm_info),	\
+	INTEL_GM45_IDS(&intel_gm45_info), 	\
+	INTEL_G45_IDS(&intel_g45_info), 	\
+	INTEL_PINEVIEW_IDS(&intel_pineview_info),	\
+	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),	\
+	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),	\
+	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),	\
+	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),	\
+	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),	\
+	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),	\
+	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
+	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
 static const struct pci_device_id pciidlist[] = {		/* aka */
-	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
-	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
-	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
-	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
-	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
-	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
-	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
-	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
-	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
-	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
-	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
-	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
-	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
-	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
-	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
-	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
-	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
-	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
-	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
-	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
-	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
-	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
-	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
-	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
-	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
-	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
-	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
-	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
-	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
-	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
-	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
-	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
-	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
-	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
-	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
-	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
-	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
-	INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
-	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
-	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
-	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
-	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
-	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
-	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
-	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
-	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
-	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
-	INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
-	INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
-	INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
-	INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
-	INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
-	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
-	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
-	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
-	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
-	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
-	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
-	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
-	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
-	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
-	INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
-	INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
-	INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
-	INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
-	INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
-	INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
-	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
-	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
-	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
-	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
-	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
-	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
-	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
-	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
-	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
-	INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
-	INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
-	INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
-	INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
-	INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
-	INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
-	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
-	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
-	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
-	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
-	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
-	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
-	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
-	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
-	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
-	INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
-	INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
-	INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
-	INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
-	INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
-	INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
-	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
-	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+	INTEL_PCI_IDS,
 	{0, 0, 0}
 };
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785..35874b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 
+	/**
+	 * wq - Driver workqueue for GEM.
+	 *
+	 * NOTE: Work items scheduled here are not allowed to grab any modeset
+	 * locks, for otherwise the flushing done in the pageflip code will
+	 * result in deadlocks.
+	 */
 	struct workqueue_struct *wq;
 
 	/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10..d9e337f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+	return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 		  bool purgeable_only)
 {
+	struct list_head still_bound_list;
 	struct drm_i915_gem_object *obj, *next;
 	long count = 0;
 
@@ -1709,23 +1710,55 @@
 		}
 	}
 
-	list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
-				 global_list) {
+	/*
+	 * As we may completely rewrite the bound list whilst unbinding
+	 * (due to retiring requests) we have to strictly process only
+	 * one element of the list at the time, and recheck the list
+	 * on every iteration.
+	 */
+	INIT_LIST_HEAD(&still_bound_list);
+	while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
 		struct i915_vma *vma, *v;
 
+		obj = list_first_entry(&dev_priv->mm.bound_list,
+				       typeof(*obj), global_list);
+		list_move_tail(&obj->global_list, &still_bound_list);
+
 		if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 			continue;
 
+		/*
+		 * Hold a reference whilst we unbind this object, as we may
+		 * end up waiting for and retiring requests. This might
+		 * release the final reference (held by the active list)
+		 * and result in the object being freed from under us.
+		 * in this object being freed.
+		 *
+		 * Note 1: Shrinking the bound list is special since only active
+		 * (and hence bound objects) can contain such limbo objects, so
+		 * we don't need special tricks for shrinking the unbound list.
+		 * The only other place where we have to be careful with active
+		 * objects suddenly disappearing due to retiring requests is the
+		 * eviction code.
+		 *
+		 * Note 2: Even though the bound list doesn't hold a reference
+		 * to the object we can safely grab one here: The final object
+		 * unreferencing and the bound_list are both protected by the
+		 * dev->struct_mutex and so we won't ever be able to observe an
+		 * object on the bound_list with a reference count equals 0.
+		 */
+		drm_gem_object_reference(&obj->base);
+
 		list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
 			if (i915_vma_unbind(vma))
 				break;
 
-		if (!i915_gem_object_put_pages(obj)) {
+		if (i915_gem_object_put_pages(obj) == 0)
 			count += obj->base.size >> PAGE_SHIFT;
-			if (count >= target)
-				return count;
-		}
+
+		drm_gem_object_unreference(&obj->base);
 	}
+	list_splice(&still_bound_list, &dev_priv->mm.bound_list);
 
 	return count;
 }
@@ -1774,7 +1807,6 @@
 
 	page_count = obj->base.size / PAGE_SIZE;
 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
-		sg_free_table(st);
 		kfree(st);
 		return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05..7d5752f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@
 
 	ret = i915_mutex_lock_interruptible(obj->base.dev);
 	if (ret)
-		return ERR_PTR(ret);
+		goto err;
 
 	ret = i915_gem_object_get_pages(obj);
-	if (ret) {
-		st = ERR_PTR(ret);
-		goto out;
-	}
+	if (ret)
+		goto err_unlock;
+
+	i915_gem_object_pin_pages(obj);
 
 	/* Copy sg so that we make an independent mapping */
 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (st == NULL) {
-		st = ERR_PTR(-ENOMEM);
-		goto out;
+		ret = -ENOMEM;
+		goto err_unpin;
 	}
 
 	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
-	if (ret) {
-		kfree(st);
-		st = ERR_PTR(ret);
-		goto out;
-	}
+	if (ret)
+		goto err_free;
 
 	src = obj->pages->sgl;
 	dst = st->sgl;
@@ -73,17 +70,23 @@
 	}
 
 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
-		sg_free_table(st);
-		kfree(st);
-		st = ERR_PTR(-ENOMEM);
-		goto out;
+		ret =-ENOMEM;
+		goto err_free_sg;
 	}
 
-	i915_gem_object_pin_pages(obj);
-
-out:
 	mutex_unlock(&obj->base.dev->struct_mutex);
 	return st;
+
+err_free_sg:
+	sg_free_table(st);
+err_free:
+	kfree(st);
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err_unlock:
+	mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a..bf34577 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@
 	else
 		ret = relocate_entry_gtt(obj, reloc);
 
+	if (ret)
+		return ret;
+
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b..e15a1d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int bios_reserved = 0;
 
+	if (dev_priv->gtt.stolen_size == 0)
+		return 0;
+
 	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568..aba9d74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@
 		if (WARN_ON(ring->id != RCS))
 			return NULL;
 
-		obj = ring->private;
+		obj = ring->scratch.obj;
 		if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
 		    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
 			return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445..83cce0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock(&dev_priv->irq_lock);
 
-	queue_work(dev_priv->wq,
-		   &dev_priv->hotplug_work);
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@
 			wake_up_all(&ring->irq_queue);
 	}
 
-	queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+	/*
+	 * Our reset work can grab modeset locks (since it needs to reset the
+	 * state of outstanding pagelips). Hence it must not be run on our own
+	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+	 * code will deadlock.
+	 */
+	schedule_work(&dev_priv->gpu_error.work);
 }
 
 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@
 
 	for_each_ring(ring, dev_priv, i) {
 		if (ring->hangcheck.score > FIRE) {
-			DRM_ERROR("%s on %s\n",
-				  stuck[i] ? "stuck" : "no progress",
-				  ring->name);
+			DRM_INFO("%s on %s\n",
+				 stuck[i] ? "stuck" : "no progress",
+				 ring->name);
 			rings_hung++;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f7..c159e1a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 #define _MASKED_BIT_DISABLE(a) ((a) << 16)
 
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL		0x52
-#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
-#define SNB_GMCH_CTRL		0x50
-#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
-#define    SNB_GMCH_GGMS_MASK	0x3
-#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
-#define    SNB_GMCH_GMS_MASK    0x1f
-
-
 /* PCI config space */
 
 #define HPLLCC	0xc0 /* 855 only */
@@ -245,6 +230,7 @@
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
 #define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
 #define   MI_INVALIDATE_TLB		(1<<18)
@@ -693,6 +679,23 @@
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 
 #define DERRMR		0x44050
+#define   DERRMR_PIPEA_SCANLINE		(1<<0)
+#define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
+#define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
+#define   DERRMR_PIPEA_VBLANK		(1<<3)
+#define   DERRMR_PIPEA_HBLANK		(1<<5)
+#define   DERRMR_PIPEB_SCANLINE 	(1<<8)
+#define   DERRMR_PIPEB_PRI_FLIP_DONE	(1<<9)
+#define   DERRMR_PIPEB_SPR_FLIP_DONE	(1<<10)
+#define   DERRMR_PIPEB_VBLANK		(1<<11)
+#define   DERRMR_PIPEB_HBLANK		(1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define   DERRMR_PIPEC_SCANLINE		(1<<14)
+#define   DERRMR_PIPEC_PRI_FLIP_DONE	(1<<15)
+#define   DERRMR_PIPEC_SPR_FLIP_DONE	(1<<20)
+#define   DERRMR_PIPEC_VBLANK		(1<<21)
+#define   DERRMR_PIPEC_HBLANK		(1<<22)
+
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
@@ -3310,6 +3313,7 @@
 #define   MCURSOR_PIPE_A	0x00
 #define   MCURSOR_PIPE_B	(1 << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define   CURSOR_TRICKLE_FEED_DISABLE	(1 << 14)
 #define _CURABASE		(dev_priv->info->display_mmio_offset + 0x70084)
 #define _CURAPOS		(dev_priv->info->display_mmio_offset + 0x70088)
 #define   CURSOR_POS_MASK       0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f..c8c4112 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
 
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			vlv_gpu_freq(dev_priv->mem_freq,
+				     dev_priv->rps.rpe_delay));
+}
+
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
 
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@
 	NULL,
 };
 
+static const struct attribute *vlv_attrs[] = {
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_vlv_rpe_freq_mhz.attr,
+	NULL,
+};
+
 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr, char *buf,
 				loff_t off, size_t count)
@@ -492,11 +513,13 @@
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6) {
+	ret = 0;
+	if (IS_VALLEYVIEW(dev))
+		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+	else if (INTEL_INFO(dev)->gen >= 6)
 		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
-		if (ret)
-			DRM_ERROR("gen6 sysfs setup failed\n");
-	}
+	if (ret)
+		DRM_ERROR("RPS sysfs setup failed\n");
 
 	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
 				    &error_state_attr);
@@ -507,7 +530,10 @@
 void i915_teardown_sysfs(struct drm_device *dev)
 {
 	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
-	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+	if (IS_VALLEYVIEW(dev))
+		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+	else
+		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875..ea9022e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crt *crt = intel_attached_crt(connector);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (INTEL_INFO(dev)->gen >= 5) {
 		u32 adpa;
 
 		adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d8..2489d0b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@
 	else
 		dspcntr &= ~DISPPLANE_TILED;
 
-	/* must disable */
-	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+	if (IS_HASWELL(dev))
+		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+	else
+		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
 	I915_WRITE(reg, dspcntr);
 
@@ -6762,8 +6764,10 @@
 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 			cntl |= CURSOR_MODE_DISABLE;
 		}
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(dev)) {
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
+			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+		}
 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
 		intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@
 		}
 	}
 
-	pipe_config->adjusted_mode.clock = clock.dot *
-		pipe_config->pixel_multiplier;
+	pipe_config->adjusted_mode.clock = clock.dot;
 }
 
 static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@
 	return ret;
 }
 
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued.  Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
 static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+	struct intel_ring_buffer *ring;
 	uint32_t plane_bit = 0;
-	int ret;
+	int len, ret;
+
+	ring = obj->ring;
+	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+		ring = &dev_priv->ring[BCS];
 
 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 	if (ret)
@@ -7866,10 +7867,34 @@
 		goto err_unpin;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	len = 4;
+	if (ring->id == RCS)
+		len += 6;
+
+	ret = intel_ring_begin(ring, len);
 	if (ret)
 		goto err_unpin;
 
+	/* Unmask the flip-done completion message. Note that the bspec says that
+	 * we should do this for both the BCS and RCS, and that we must not unmask
+	 * more than one flip event at any time (or ensure that one flip message
+	 * can be sent by waiting for flip-done prior to queueing new flips).
+	 * Experimentation says that BCS works despite DERRMR masking all
+	 * flip-done completion events and that unmasking all planes at once
+	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
+	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
+	 */
+	if (ring->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+					DERRMR_PIPEB_PRI_FLIP_DONE |
+					DERRMR_PIPEC_PRI_FLIP_DONE));
+		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+		intel_ring_emit(ring, DERRMR);
+		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+	}
+
 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@
 	POSTING_READ(vga_reg);
 }
 
+static void i915_enable_vga_mem(struct drm_device *dev)
+{
+	/* Enable VGA memory on Intel HD */
+	if (HAS_PCH_SPLIT(dev)) {
+		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+		vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+						   VGA_RSRC_LEGACY_MEM |
+						   VGA_RSRC_NORMAL_IO |
+						   VGA_RSRC_NORMAL_MEM);
+		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	}
+}
+
+void i915_disable_vga_mem(struct drm_device *dev)
+{
+	/* Disable VGA memory on Intel HD */
+	if (HAS_PCH_SPLIT(dev)) {
+		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+		vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+						   VGA_RSRC_NORMAL_IO |
+						   VGA_RSRC_NORMAL_MEM);
+		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	}
+}
+
 void intel_modeset_init_hw(struct drm_device *dev)
 {
 	intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@
 	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 		i915_disable_vga(dev);
+		i915_disable_vga_mem(dev);
 	}
 }
 
@@ -10513,6 +10566,8 @@
 
 	intel_disable_fbc(dev);
 
+	i915_enable_vga_mem(dev);
+
 	intel_disable_gt_powersave(dev);
 
 	ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1760808..a47799e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@
 			    struct drm_display_mode *fixed_mode);
 extern void intel_panel_fini(struct intel_panel *panel);
 
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
 				    struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@
 extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+extern void i915_disable_vga_mem(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278..831a5c0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	struct drm_display_mode *fixed_mode =
-		lvds_encoder->attached_connector->base.panel.fixed_mode;
+	const struct drm_display_mode *adjusted_mode =
+		&crtc->config.adjusted_mode;
 	int pipe = crtc->pipe;
 	u32 temp;
 
@@ -183,9 +183,9 @@
 			temp &= ~LVDS_ENABLE_DITHER;
 	}
 	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
-	if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 		temp |= LVDS_HSYNC_POLARITY;
-	if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 		temp |= LVDS_VSYNC_POLARITY;
 
 	I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb6..119771f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@
 		return ASLE_BACKLIGHT_FAILED;
 
 	intel_panel_set_backlight(dev, bclp, 255);
-	iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33b..42114ec 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 
 void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
 {
-	adjusted_mode->hdisplay = fixed_mode->hdisplay;
-	adjusted_mode->hsync_start = fixed_mode->hsync_start;
-	adjusted_mode->hsync_end = fixed_mode->hsync_end;
-	adjusted_mode->htotal = fixed_mode->htotal;
+	drm_mode_copy(adjusted_mode, fixed_mode);
 
-	adjusted_mode->vdisplay = fixed_mode->vdisplay;
-	adjusted_mode->vsync_start = fixed_mode->vsync_start;
-	adjusted_mode->vsync_end = fixed_mode->vsync_end;
-	adjusted_mode->vtotal = fixed_mode->vtotal;
-
-	adjusted_mode->clock = fixed_mode->clock;
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
 }
 
 /* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4605682..0c115cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@
 static void gen6_enable_rps_interrupts(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 enabled_intrs;
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON(dev_priv->rps.pm_iir);
 	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
 	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 	spin_unlock_irq(&dev_priv->irq_lock);
+
 	/* only unmask PM interrupts we need. Mask all others. */
-	I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+	enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+	/* IVB and SNB hard hangs on looping batchbuffer
+	 * if GEN6_PM_UP_EI_EXPIRED is masked.
+	 */
+	if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+		enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+	I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
 }
 
 static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@
 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-	g4x_disable_trickle_feed(dev);
-
 	/* WaVSRefCountFullforceMissDisable:hsw */
 	gen7_setup_fixed_func_scheduler(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05ccea..460ee10 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
-	struct drm_i915_gem_object *obj;
-	volatile u32 *cpu_page;
-	u32 gtt_offset;
-};
-
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
 	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@
 static int
 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 
@@ -213,8 +202,7 @@
                          u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/*
@@ -481,68 +468,43 @@
 static int
 init_pipe_control(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc;
-	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (ring->private)
+	if (ring->scratch.obj)
 		return 0;
 
-	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
-	if (!pc)
-		return -ENOMEM;
-
-	obj = i915_gem_alloc_object(ring->dev, 4096);
-	if (obj == NULL) {
+	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (ring->scratch.obj == NULL) {
 		DRM_ERROR("Failed to allocate seqno page\n");
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
 	if (ret)
 		goto err_unref;
 
-	pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
-	pc->cpu_page = kmap(sg_page(obj->pages->sgl));
-	if (pc->cpu_page == NULL) {
+	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+	if (ring->scratch.cpu_page == NULL) {
 		ret = -ENOMEM;
 		goto err_unpin;
 	}
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-			 ring->name, pc->gtt_offset);
-
-	pc->obj = obj;
-	ring->private = pc;
+			 ring->name, ring->scratch.gtt_offset);
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_unpin(ring->scratch.obj);
 err_unref:
-	drm_gem_object_unreference(&obj->base);
+	drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
-	kfree(pc);
 	return ret;
 }
 
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
-	struct pipe_control *pc = ring->private;
-	struct drm_i915_gem_object *obj;
-
-	obj = pc->obj;
-
-	kunmap(sg_page(obj->pages->sgl));
-	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-
-	kfree(pc);
-}
-
 static int init_render_ring(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@
 {
 	struct drm_device *dev = ring->dev;
 
-	if (!ring->private)
+	if (ring->scratch.obj == NULL)
 		return;
 
-	if (HAS_BROKEN_CS_TLB(dev))
-		drm_gem_object_unreference(to_gem_object(ring->private));
+	if (INTEL_INFO(dev)->gen >= 5) {
+		kunmap(sg_page(ring->scratch.obj->pages->sgl));
+		i915_gem_object_unpin(ring->scratch.obj);
+	}
 
-	if (INTEL_INFO(dev)->gen >= 5)
-		cleanup_pipe_control(ring);
-
-	ring->private = NULL;
+	drm_gem_object_unreference(&ring->scratch.obj->base);
+	ring->scratch.obj = NULL;
 }
 
 static void
@@ -742,8 +704,7 @@
 static int
 pc_render_add_request(struct intel_ring_buffer *ring)
 {
-	struct pipe_control *pc = ring->private;
-	u32 scratch_addr = pc->gtt_offset + 128;
+	u32 scratch_addr = ring->scratch.gtt_offset + 128;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@
 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
@@ -814,15 +775,13 @@
 static u32
 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-	struct pipe_control *pc = ring->private;
-	return pc->cpu_page[0];
+	return ring->scratch.cpu_page[0];
 }
 
 static void
 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-	struct pipe_control *pc = ring->private;
-	pc->cpu_page[0] = seqno;
+	ring->scratch.cpu_page[0] = seqno;
 }
 
 static bool
@@ -1141,8 +1100,7 @@
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_advance(ring);
 	} else {
-		struct drm_i915_gem_object *obj = ring->private;
-		u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+		u32 cs_offset = ring->scratch.gtt_offset;
 
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
@@ -1835,7 +1793,8 @@
 			return ret;
 		}
 
-		ring->private = obj;
+		ring->scratch.obj = obj;
+		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
 	}
 
 	return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad53..68b1ca974 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@
 
 	struct intel_ring_hangcheck hangcheck;
 
-	void *private;
+	struct {
+		struct drm_i915_gem_object *obj;
+		u32 gtt_offset;
+		volatile u32 *cpu_page;
+	} scratch;
 };
 
 static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058..85037b9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = intel_encoder->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
 	struct drm_display_mode *adjusted_mode =
-		&intel_crtc->config.adjusted_mode;
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+		&crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &crtc->config.requested_mode;
 	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@
 	 * adjusted_mode.
 	 */
 	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+	input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
 	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
 		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
 	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
 		DRM_INFO("Setting input timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
-	switch (intel_crtc->config.pixel_multiplier) {
+	switch (crtc->config.pixel_multiplier) {
 	default:
 		WARN(1, "unknown pixel mutlipler specified\n");
 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@
 	}
 
 	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
-		sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+		sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
 	else
-		sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+		sdvox |= SDVO_PIPE_SEL(crtc->pipe);
 
 	if (intel_sdvo->has_hdmi_audio)
 		sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@
 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
 		/* done in crtc_mode_set as it lives inside the dpll register */
 	} else {
-		sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+		sdvox |= (crtc->config.pixel_multiplier - 1)
 			<< SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621c..ad6ec4b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SPRITE_TILED;
 
-	/* must disable */
-	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+	if (IS_HASWELL(dev))
+		sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+	else
+		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
 	sprctl |= SPRITE_ENABLE;
 
 	if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc86..8649f1c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@
 	}
 }
 
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -272,6 +272,11 @@
 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 			__gen6_gt_force_wake_mt_reset(dev_priv);
 	}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+	intel_uncore_forcewake_reset(dev);
 
 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
 	intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@
 	/* Spin waiting for the device to ack the reset request */
 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
 
+	intel_uncore_forcewake_reset(dev);
+
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
 	if (dev_priv->uncore.forcewake_count)
 		dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644..e893c53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 
 	nouveau_fbcon_zfill_all(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
@@ -671,7 +672,8 @@
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 	nouveau_fbcon_zfill_all(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
@@ -906,7 +908,8 @@
 	pci_set_master(pdev);
 
 	ret = nouveau_do_resume(drm_dev);
-	nouveau_display_resume(drm_dev);
+	if (drm_dev->mode_config.num_crtc)
+		nouveau_display_resume(drm_dev);
 	drm_kms_helper_poll_enable(drm_dev);
 	/* do magic */
 	nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac796..32923d2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-		    radeon_audio)
+		if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+		    (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		     (radeon_connector->audio == RADEON_AUDIO_AUTO)))
 			return ATOM_ENCODER_MODE_HDMI;
 		else if (radeon_connector->use_digital)
 			return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-		    radeon_audio)
+		if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+		    (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		     (radeon_connector->audio == RADEON_AUDIO_AUTO)))
 			return ATOM_ENCODER_MODE_HDMI;
 		else
 			return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-			 radeon_audio)
+		else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+			 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			  (radeon_connector->audio == RADEON_AUDIO_AUTO)))
 			return ATOM_ENCODER_MODE_HDMI;
 		else
 			return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@
 			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			/* some early dce3.2 boards have a bug in their transmitter control table */
-			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+			/* some dce3.x boards have a bug in their transmitter control table.
+			 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+			 * does the same thing and more.
+			 */
+			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+			    (rdev->family != CHIP_RS880))
 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		}
 		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e694..05ff315 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2340,12 +2340,6 @@
 		return ret;
 	}
 
-	ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("rv770_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 916630f..8996274 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4208,6 +4208,7 @@
 	pi->pspp_notify_required = false;
 	if (target_link_speed > current_link_speed) {
 		switch (target_link_speed) {
+#ifdef CONFIG_ACPI
 		case RADEON_PCIE_GEN3:
 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
 				break;
@@ -4217,6 +4218,7 @@
 		case RADEON_PCIE_GEN2:
 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
 				break;
+#endif
 		default:
 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
 			break;
@@ -4248,7 +4250,9 @@
 		    (ci_get_current_pcie_speed(rdev) > 0))
 			return;
 
+#ifdef CONFIG_ACPI
 		radeon_acpi_pcie_performance_request(rdev, request, false);
+#endif
 	}
 }
 
@@ -4744,12 +4748,6 @@
 	if (pi->pcie_performance_request)
 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
 
-	ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("ci_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
 			     RADEON_CG_BLOCK_MC |
 			     RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd..252e10a 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit)
 {
+	unsigned long flags;
 	u32 data, original_data;
 	u32 addr;
 	u32 extra_shift;
-	int ret;
+	int ret = 0;
 
 	if (smc_start_address & 3)
 		return -EINVAL;
@@ -59,13 +60,14 @@
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 
@@ -80,7 +82,7 @@
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_IND_DATA_0);
 
@@ -97,11 +99,15 @@
 
 		ret = ci_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 	}
-	return 0;
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@
 
 int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 {
+	unsigned long flags;
 	u32 ucode_start_address;
 	u32 ucode_size;
 	const u8 *src;
@@ -219,6 +226,7 @@
 		return -EINVAL;
 
 	src = (const u8 *)rdev->smc_fw->data;
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(SMC_IND_INDEX_0, ucode_start_address);
 	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
 	while (ucode_size >= 4) {
@@ -231,6 +239,7 @@
 		ucode_size -= 4;
 	}
 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
 	return 0;
 }
@@ -238,25 +247,29 @@
 int ci_read_smc_sram_dword(struct radeon_device *rdev,
 			   u32 smc_address, u32 *value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	*value = RREG32(SMC_IND_DATA_0);
-	return 0;
+	return ret;
 }
 
 int ci_write_smc_sram_dword(struct radeon_device *rdev,
 			    u32 smc_address, u32 value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = ci_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_IND_DATA_0, value);
-	return 0;
+	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba05..adbdb65 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@
 static void cik_program_aspm(struct radeon_device *rdev);
 static void cik_init_pg(struct radeon_device *rdev);
 static void cik_init_cg(struct radeon_device *rdev);
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					  bool enable);
 
 /* get temperature in millidegrees */
 int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@
  */
 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
 	WREG32(PCIE_INDEX, reg);
 	(void)RREG32(PCIE_INDEX);
 	r = RREG32(PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
 	return r;
 }
 
 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
 	WREG32(PCIE_INDEX, reg);
 	(void)RREG32(PCIE_INDEX);
 	WREG32(PCIE_DATA, v);
 	(void)RREG32(PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
 }
 
 static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@
 		} else if ((rdev->pdev->device == 0x1309) ||
 			   (rdev->pdev->device == 0x130A) ||
 			   (rdev->pdev->device == 0x130D) ||
-			   (rdev->pdev->device == 0x1313)) {
+			   (rdev->pdev->device == 0x1313) ||
+			   (rdev->pdev->device == 0x131D)) {
 			rdev->config.cik.max_cu_per_sh = 6;
 			rdev->config.cik.max_backends_per_se = 2;
 		} else if ((rdev->pdev->device == 0x1306) ||
@@ -4013,6 +4023,8 @@
 {
 	int r;
 
+	cik_enable_gui_idle_interrupt(rdev, false);
+
 	r = cik_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -4024,6 +4036,8 @@
 	if (r)
 		return r;
 
+	cik_enable_gui_idle_interrupt(rdev, true);
+
 	return 0;
 }
 
@@ -5376,7 +5390,9 @@
 void cik_update_cg(struct radeon_device *rdev,
 		   u32 block, bool enable)
 {
+
 	if (block & RADEON_CG_BLOCK_GFX) {
+		cik_enable_gui_idle_interrupt(rdev, false);
 		/* order matters! */
 		if (enable) {
 			cik_enable_mgcg(rdev, true);
@@ -5385,6 +5401,7 @@
 			cik_enable_cgcg(rdev, false);
 			cik_enable_mgcg(rdev, false);
 		}
+		cik_enable_gui_idle_interrupt(rdev, true);
 	}
 
 	if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5558,7 @@
 {
 	u32 data, orig;
 
-	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
 		orig = data = RREG32(RLC_PG_CNTL);
 		data |= GFX_PG_ENABLE;
 		if (orig != data)
@@ -5805,7 +5822,7 @@
 	if (rdev->pg_flags) {
 		cik_enable_sck_slowdown_on_pu(rdev, true);
 		cik_enable_sck_slowdown_on_pd(rdev, true);
-		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
 			cik_init_gfx_cgpg(rdev);
 			cik_enable_cp_pg(rdev, true);
 			cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5836,7 @@
 {
 	if (rdev->pg_flags) {
 		cik_update_gfx_pg(rdev, false);
-		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
 			cik_enable_cp_pg(rdev, false);
 			cik_enable_gds_pg(rdev, false);
 		}
@@ -5895,7 +5912,9 @@
 	u32 tmp;
 
 	/* gfx ring */
-	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
 	/* sdma */
 	tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
 	WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6055,7 @@
  */
 int cik_irq_set(struct radeon_device *rdev)
 {
-	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
-		PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+	u32 cp_int_cntl;
 	u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
 	u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6076,10 @@
 		return 0;
 	}
 
+	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+
 	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
 	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
 	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db..91bb470 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@
 	if (eg_pi->pcie_performance_request)
 		cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
 
-	ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("rv770_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e..85a69d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
 static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
 			      u32 block_offset, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->end_idx_lock, flags);
 	WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 	r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+	spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+
 	return r;
 }
 
 static void dce6_endpoint_wreg(struct radeon_device *rdev,
 			       u32 block_offset, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->end_idx_lock, flags);
 	if (ASIC_IS_DCE8(rdev))
 		WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 	else
 		WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
 		       AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
 	WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+	spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
 }
 
 #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	u32 offset = dig->afmt->offset;
-	u32 id = dig->afmt->pin->id;
 
 	if (!dig->afmt->pin)
 		return;
 
-	WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
 }
 
 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd6080..7139906 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@
 static void kv_enable_new_levels(struct radeon_device *rdev);
 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
 					   struct radeon_ps *new_rps);
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
 static int kv_set_enabled_levels(struct radeon_device *rdev);
 static int kv_force_dpm_highest(struct radeon_device *rdev);
 static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@
 
 static void kv_program_vc(struct radeon_device *rdev)
 {
-	WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+	WREG32_SMC(CG_FTV_0, 0x3FFFC100);
 }
 
 static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@
 
 static int kv_unforce_levels(struct radeon_device *rdev)
 {
-	return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+	if (rdev->family == CHIP_KABINI)
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+	else
+		return kv_set_enabled_levels(rdev);
 }
 
 static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@
 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
 
 	if (table && table->count) {
-		for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
-			if ((table->entries[i].clk == pi->boot_pl.sclk) ||
-			    (i == 0))
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].clk == pi->boot_pl.sclk)
 				break;
 		}
 
@@ -682,9 +685,8 @@
 		if (table->num_max_dpm_entries == 0)
 			return -EINVAL;
 
-		for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
-			if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
-			    (i == 0))
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
 				break;
 		}
 
@@ -1078,6 +1080,13 @@
 					PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
 }
 
+static void kv_reset_acp_boot_level(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	pi->acp_boot_level = 0xff;
+}
+
 static void kv_update_current_ps(struct radeon_device *rdev,
 				 struct radeon_ps *rps)
 {
@@ -1100,6 +1109,18 @@
 	pi->requested_rps.ps_priv = &pi->requested_ps;
 }
 
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	int ret;
+
+	if (pi->bapm_enable) {
+		ret = kv_smc_bapm_enable(rdev, enable);
+		if (ret)
+			DRM_ERROR("kv_smc_bapm_enable failed\n");
+	}
+}
+
 int kv_dpm_enable(struct radeon_device *rdev)
 {
 	struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@
 		return ret;
 	}
 
+	kv_reset_acp_boot_level(rdev);
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@
 		radeon_irq_set(rdev);
 	}
 
+	ret = kv_smc_bapm_enable(rdev, false);
+	if (ret) {
+		DRM_ERROR("kv_smc_bapm_enable failed\n");
+		return ret;
+	}
+
 	/* powerdown unused blocks for now */
 	kv_dpm_powergate_acp(rdev, true);
 	kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@
 			     RADEON_CG_BLOCK_BIF |
 			     RADEON_CG_BLOCK_HDP), false);
 
+	kv_smc_bapm_enable(rdev, false);
+
 	/* powerup blocks */
 	kv_dpm_powergate_acp(rdev, false);
 	kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@
 	return kv_enable_samu_dpm(rdev, !gate);
 }
 
+static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
+{
+	u8 i;
+	struct radeon_clock_voltage_dependency_table *table =
+		&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].clk >= 0) /* XXX */
+			break;
+	}
+
+	if (i >= table->count)
+		i = table->count - 1;
+
+	return i;
+}
+
+static void kv_update_acp_boot_level(struct radeon_device *rdev)
+{
+	struct kv_power_info *pi = kv_get_pi(rdev);
+	u8 acp_boot_level;
+
+	if (!pi->caps_stable_p_state) {
+		acp_boot_level = kv_get_acp_boot_level(rdev);
+		if (acp_boot_level != pi->acp_boot_level) {
+			pi->acp_boot_level = acp_boot_level;
+			kv_send_msg_to_smc_with_parameter(rdev,
+							  PPSMC_MSG_ACPDPM_SetEnabledMask,
+							  (1 << pi->acp_boot_level));
+		}
+	}
+}
+
 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
 {
 	struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@
 		if (pi->caps_stable_p_state)
 			pi->acp_boot_level = table->count - 1;
 		else
-			pi->acp_boot_level = 0;
+			pi->acp_boot_level = kv_get_acp_boot_level(rdev);
 
 		ret = kv_copy_bytes_to_smc(rdev,
 					   pi->dpm_table_start +
@@ -1588,13 +1652,11 @@
 			}
 		}
 
-		for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
-			if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
-			    (i == 0)) {
-				pi->highest_valid = i;
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+			if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
 				break;
-			}
 		}
+		pi->highest_valid = i;
 
 		if (pi->lowest_valid > pi->highest_valid) {
 			if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@
 			}
 		}
 
-		for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+		for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
 			if (table->entries[i].sclk_frequency <=
-			    new_ps->levels[new_ps->num_levels - 1].sclk ||
-			    i == 0) {
-				pi->highest_valid = i;
+			    new_ps->levels[new_ps->num_levels - 1].sclk)
 				break;
-			}
 		}
+		pi->highest_valid = i;
 
 		if (pi->lowest_valid > pi->highest_valid) {
 			if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@
 			     RADEON_CG_BLOCK_BIF |
 			     RADEON_CG_BLOCK_HDP), false);
 
+	if (pi->bapm_enable) {
+		ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+		if (ret) {
+			DRM_ERROR("kv_smc_bapm_enable failed\n");
+			return ret;
+		}
+	}
+
 	if (rdev->family == CHIP_KABINI) {
 		if (pi->enable_dpm) {
 			kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@
 				return ret;
 			}
 #endif
+			kv_update_acp_boot_level(rdev);
 			kv_update_sclk_t(rdev);
 			kv_enable_nb_dpm(rdev);
 		}
@@ -1785,7 +1854,6 @@
 			     RADEON_CG_BLOCK_BIF |
 			     RADEON_CG_BLOCK_HDP), true);
 
-	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
 	return 0;
 }
 
@@ -1806,12 +1874,23 @@
 
 void kv_dpm_reset_asic(struct radeon_device *rdev)
 {
-	kv_force_lowest_valid(rdev);
-	kv_init_graphics_levels(rdev);
-	kv_program_bootup_state(rdev);
-	kv_upload_dpm_settings(rdev);
-	kv_force_lowest_valid(rdev);
-	kv_unforce_levels(rdev);
+	struct kv_power_info *pi = kv_get_pi(rdev);
+
+	if (rdev->family == CHIP_KABINI) {
+		kv_force_lowest_valid(rdev);
+		kv_init_graphics_levels(rdev);
+		kv_program_bootup_state(rdev);
+		kv_upload_dpm_settings(rdev);
+		kv_force_lowest_valid(rdev);
+		kv_unforce_levels(rdev);
+	} else {
+		kv_init_graphics_levels(rdev);
+		kv_program_bootup_state(rdev);
+		kv_freeze_sclk_dpm(rdev, true);
+		kv_upload_dpm_settings(rdev);
+		kv_freeze_sclk_dpm(rdev, false);
+		kv_set_enabled_level(rdev, pi->graphics_boot_level);
+	}
 }
 
 //XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@
 	if (ret)
 		return ret;
 
-	for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+	for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
 		if (enable_mask & (1 << i))
 			break;
 	}
 
-	return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	if (rdev->family == CHIP_KABINI)
+		return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	else
+		return kv_set_enabled_level(rdev, i);
 }
 
 static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@
 			break;
 	}
 
-	return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	if (rdev->family == CHIP_KABINI)
+		return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+	else
+		return kv_set_enabled_level(rdev, i);
 }
 
 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@
 	if (!pi->caps_sclk_ds)
 		return 0;
 
-	for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+	for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
 		temp = sclk / sumo_get_sleep_divider_from_id(i);
-		if ((temp >= min) || (i == 0))
+		if (temp >= min)
 			break;
 	}
 
@@ -2039,12 +2124,12 @@
 		ps->dpmx_nb_ps_lo = 0x1;
 		ps->dpmx_nb_ps_hi = 0x0;
 	} else {
-		ps->dpm0_pg_nb_ps_lo = 0x1;
+		ps->dpm0_pg_nb_ps_lo = 0x3;
 		ps->dpm0_pg_nb_ps_hi = 0x0;
-		ps->dpmx_nb_ps_lo = 0x2;
-		ps->dpmx_nb_ps_hi = 0x1;
+		ps->dpmx_nb_ps_lo = 0x3;
+		ps->dpmx_nb_ps_hi = 0x0;
 
-		if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+		if (pi->sys_info.nb_dpm_enable) {
 			force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
 				pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
 				pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@
 	}
 }
 
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
+{
+	u32 new_mask = (1 << level);
+
+	return kv_send_msg_to_smc_with_parameter(rdev,
+						 PPSMC_MSG_SCLKDPM_SetEnabledMask,
+						 new_mask);
+}
+
 static int kv_set_enabled_levels(struct radeon_device *rdev)
 {
 	struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079..8cef752 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@
 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
 			   u32 *value, u32 limit);
 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
 int kv_copy_bytes_to_smc(struct radeon_device *rdev,
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d..0000b59 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@
 		return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
 }
 
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+	else
+		return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
 int kv_copy_bytes_to_smc(struct radeon_device *rdev,
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c..6c398a4 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3865,12 +3865,6 @@
 		return ret;
 	}
 
-	ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("ni_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 6828428..5670b82 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@
 #define PPSMC_MSG_VCEPowerON                ((uint32_t) 0x10f)
 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment   ((uint32_t) 0x11d)
 #define PPSMC_MSG_DCE_AllowVoltageAdjustment    ((uint32_t) 0x11e)
+#define PPSMC_MSG_EnableBAPM                ((uint32_t) 0x120)
+#define PPSMC_MSG_DisableBAPM               ((uint32_t) 0x121)
 #define PPSMC_MSG_UVD_DPM_Config            ((uint32_t) 0x124)
 
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd..2417571 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@
 
 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t data;
 
+	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
 	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
 	r100_pll_errata_after_index(rdev);
 	data = RREG32(RADEON_CLOCK_CNTL_DATA);
 	r100_pll_errata_after_data(rdev);
+	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
 	return data;
 }
 
 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
 	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
 	r100_pll_errata_after_index(rdev);
 	WREG32(RADEON_CLOCK_CNTL_DATA, v);
 	r100_pll_errata_after_data(rdev);
+	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
 }
 
 static void r100_set_safe_registers(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ec..6edf2b3 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@
 
 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
 	r = RREG32(R_0001FC_MC_IND_DATA);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 	return r;
 }
 
 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
 		S_0001F8_MC_IND_WR_EN(1));
 	WREG32(R_0001FC_MC_IND_DATA, v);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734..2a1b187 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@
 	return rdev->clock.spll.reference_freq;
 }
 
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	return 0;
+}
+
 /* get temperature in millidegrees */
 int rv6xx_get_temp(struct radeon_device *rdev)
 {
@@ -1045,20 +1050,27 @@
 
 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t r;
 
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
 	r = RREG32(R_0028FC_MC_DATA);
 	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 	return r;
 }
 
 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
 		S_0028F8_MC_IND_WR_EN(1));
 	WREG32(R_0028FC_MC_DATA, v);
 	WREG32(R_0028F8_MC_INDEX, 0x7F);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@
  */
 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
 	(void)RREG32(PCIE_PORT_INDEX);
 	r = RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
 	return r;
 }
 
 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
 	(void)RREG32(PCIE_PORT_INDEX);
 	WREG32(PCIE_PORT_DATA, (v));
 	(void)RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46..e65f211 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1219,30 +1219,20 @@
 
 void r600_free_extended_power_table(struct radeon_device *rdev)
 {
-	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
-		kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
-	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
-		kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
-	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
-		kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
-	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
-		kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
-	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
-	if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
-	if (rdev->pm.dpm.dyn_state.ppm_table)
-		kfree(rdev->pm.dpm.dyn_state.ppm_table);
-	if (rdev->pm.dpm.dyn_state.cac_tdp_table)
-		kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
-	if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
-	if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
-	if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
-	if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
-		kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
+	struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
+
+	kfree(dyn_state->vddc_dependency_on_sclk.entries);
+	kfree(dyn_state->vddci_dependency_on_mclk.entries);
+	kfree(dyn_state->vddc_dependency_on_mclk.entries);
+	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+	kfree(dyn_state->cac_leakage_table.entries);
+	kfree(dyn_state->phase_shedding_limits_table.entries);
+	kfree(dyn_state->ppm_table);
+	kfree(dyn_state->cac_tdp_table);
+	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
 }
 
 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a..e673fe2 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
 #       define HDMI0_AVI_INFO_CONT   (1 << 1)
 #       define HDMI0_AUDIO_INFO_SEND (1 << 4)
 #       define HDMI0_AUDIO_INFO_CONT (1 << 5)
-#       define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
 #       define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
 #       define HDMI0_MPEG_INFO_SEND  (1 << 8)
 #       define HDMI0_MPEG_INFO_CONT  (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564..a400ac1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@
 #define RADEON_CG_SUPPORT_HDP_MGCG		(1 << 16)
 
 /* PG flags */
-#define RADEON_PG_SUPPORT_GFX_CG		(1 << 0)
+#define RADEON_PG_SUPPORT_GFX_PG		(1 << 0)
 #define RADEON_PG_SUPPORT_GFX_SMG		(1 << 1)
 #define RADEON_PG_SUPPORT_GFX_DMG		(1 << 2)
 #define RADEON_PG_SUPPORT_UVD			(1 << 3)
@@ -1778,6 +1778,7 @@
 		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
 		bool (*vblank_too_short)(struct radeon_device *rdev);
 		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
+		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
 	} dpm;
 	/* pageflipping */
 	struct {
@@ -2110,6 +2111,28 @@
 	resource_size_t			rmmio_size;
 	/* protects concurrent MM_INDEX/DATA based register access */
 	spinlock_t mmio_idx_lock;
+	/* protects concurrent SMC based register access */
+	spinlock_t smc_idx_lock;
+	/* protects concurrent PLL register access */
+	spinlock_t pll_idx_lock;
+	/* protects concurrent MC register access */
+	spinlock_t mc_idx_lock;
+	/* protects concurrent PCIE register access */
+	spinlock_t pcie_idx_lock;
+	/* protects concurrent PCIE_PORT register access */
+	spinlock_t pciep_idx_lock;
+	/* protects concurrent PIF register access */
+	spinlock_t pif_idx_lock;
+	/* protects concurrent CG register access */
+	spinlock_t cg_idx_lock;
+	/* protects concurrent UVD register access */
+	spinlock_t uvd_idx_lock;
+	/* protects concurrent RCU register access */
+	spinlock_t rcu_idx_lock;
+	/* protects concurrent DIDT register access */
+	spinlock_t didt_idx_lock;
+	/* protects concurrent ENDPOINT (audio) register access */
+	spinlock_t end_idx_lock;
 	void __iomem			*rmmio;
 	radeon_rreg_t			mc_rreg;
 	radeon_wreg_t			mc_wreg;
@@ -2277,123 +2300,179 @@
  */
 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t r;
 
+	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
 	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
 	r = RREG32(RADEON_PCIE_DATA);
+	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
 	return r;
 }
 
 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
 	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
 	WREG32(RADEON_PCIE_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
 }
 
 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(TN_SMC_IND_INDEX_0, (reg));
 	r = RREG32(TN_SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 	return r;
 }
 
 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(TN_SMC_IND_INDEX_0, (reg));
 	WREG32(TN_SMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 }
 
 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
 	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
 	r = RREG32(R600_RCU_DATA);
+	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
 	return r;
 }
 
 static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
 	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
 	WREG32(R600_RCU_DATA, (v));
+	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
 }
 
 static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
 	r = RREG32(EVERGREEN_CG_IND_DATA);
+	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
 	return r;
 }
 
 static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
 	WREG32(EVERGREEN_CG_IND_DATA, (v));
+	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
 }
 
 static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
 	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
 	return r;
 }
 
 static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
 	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
 }
 
 static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
 	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
 	return r;
 }
 
 static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
 	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
 }
 
 static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
 	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
 	r = RREG32(R600_UVD_CTX_DATA);
+	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
 	return r;
 }
 
 static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
 	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
 	WREG32(R600_UVD_CTX_DATA, (v));
+	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
 }
 
 
 static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
 {
+	unsigned long flags;
 	u32 r;
 
+	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
 	WREG32(CIK_DIDT_IND_INDEX, (reg));
 	r = RREG32(CIK_DIDT_IND_DATA);
+	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
 	return r;
 }
 
 static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
 	WREG32(CIK_DIDT_IND_INDEX, (reg));
 	WREG32(CIK_DIDT_IND_DATA, (v));
+	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
 }
 
 void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@
 #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
 #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
 #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
+#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
 
 /* Common functions */
 /* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b..5003385 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1037,6 +1037,7 @@
 		.set_pcie_lanes = &r600_set_pcie_lanes,
 		.set_clock_gating = NULL,
 		.get_temperature = &rv6xx_get_temp,
+		.set_uvd_clocks = &r600_set_uvd_clocks,
 	},
 	.dpm = {
 		.init = &rv6xx_dpm_init,
@@ -1126,6 +1127,7 @@
 		.set_pcie_lanes = NULL,
 		.set_clock_gating = NULL,
 		.get_temperature = &rv6xx_get_temp,
+		.set_uvd_clocks = &r600_set_uvd_clocks,
 	},
 	.dpm = {
 		.init = &rs780_dpm_init,
@@ -1141,6 +1143,7 @@
 		.get_mclk = &rs780_dpm_get_mclk,
 		.print_power_state = &rs780_dpm_print_power_state,
 		.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
+		.force_performance_level = &rs780_dpm_force_performance_level,
 	},
 	.pflip = {
 		.pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1794,7 @@
 		.print_power_state = &trinity_dpm_print_power_state,
 		.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
 		.force_performance_level = &trinity_dpm_force_performance_level,
+		.enable_bapm = &trinity_dpm_enable_bapm,
 	},
 	.pflip = {
 		.pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2170,7 @@
 		.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
 		.force_performance_level = &kv_dpm_force_performance_level,
 		.powergate_uvd = &kv_dpm_powergate_uvd,
+		.enable_bapm = &kv_dpm_enable_bapm,
 	},
 	.pflip = {
 		.pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2395,7 @@
 				RADEON_CG_SUPPORT_HDP_LS |
 				RADEON_CG_SUPPORT_HDP_MGCG;
 			rdev->pg_flags = 0 |
-				/*RADEON_PG_SUPPORT_GFX_CG | */
+				/*RADEON_PG_SUPPORT_GFX_PG | */
 				RADEON_PG_SUPPORT_SDMA;
 			break;
 		case CHIP_OLAND:
@@ -2479,7 +2484,7 @@
 				RADEON_CG_SUPPORT_HDP_LS |
 				RADEON_CG_SUPPORT_HDP_MGCG;
 			rdev->pg_flags = 0;
-				/*RADEON_PG_SUPPORT_GFX_CG |
+				/*RADEON_PG_SUPPORT_GFX_PG |
 				RADEON_PG_SUPPORT_GFX_SMG |
 				RADEON_PG_SUPPORT_GFX_DMG |
 				RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2512,7 @@
 				RADEON_CG_SUPPORT_HDP_LS |
 				RADEON_CG_SUPPORT_HDP_MGCG;
 			rdev->pg_flags = 0;
-				/*RADEON_PG_SUPPORT_GFX_CG |
+				/*RADEON_PG_SUPPORT_GFX_PG |
 				RADEON_PG_SUPPORT_GFX_SMG |
 				RADEON_PG_SUPPORT_UVD |
 				RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6..70c29d5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@
 u32 r600_get_xclk(struct radeon_device *rdev);
 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
 int rv6xx_get_temp(struct radeon_device *rdev);
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
 void r600_dpm_post_set_power_state(struct radeon_device *rdev);
 /* r600 dma */
@@ -428,6 +429,8 @@
 				 struct radeon_ps *ps);
 void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
 						       struct seq_file *m);
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@
 							 struct seq_file *m);
 int trinity_dpm_force_performance_level(struct radeon_device *rdev,
 					enum radeon_dpm_forced_level level);
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
 
 /* DCE6 - SI */
 void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@
 int kv_dpm_force_performance_level(struct radeon_device *rdev,
 				   enum radeon_dpm_forced_level level);
 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
 
 /* uvd v1.0 */
 uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25..79159b5 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@
 		}
 	}
 
+	if (property == rdev->mode_info.audio_property) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_connector->audio != val) {
+			radeon_connector->audio = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
 	if (property == rdev->mode_info.underscan_property) {
 		/* need to find digital encoder on connector */
 		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@
 				if (radeon_dp_getdpcd(radeon_connector))
 					ret = connector_status_connected;
 			} else {
-				/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+				/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
 				if (radeon_ddc_probe(radeon_connector, false))
 					ret = connector_status_connected;
 			}
@@ -1489,6 +1504,24 @@
 	.force = radeon_dvi_force,
 };
 
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
 void
 radeon_add_atom_connector(struct drm_device *dev,
 			  uint32_t connector_id,
@@ -1580,8 +1613,6 @@
 			goto failed;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
-		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			/* add DP i2c bus */
 			if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@
 		case DRM_MODE_CONNECTOR_VGA:
 		case DRM_MODE_CONNECTOR_DVIA:
 		default:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			connector->interlace_allowed = true;
 			connector->doublescan_allowed = true;
 			radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@
 		case DRM_MODE_CONNECTOR_HDMIA:
 		case DRM_MODE_CONNECTOR_HDMIB:
 		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_property,
 						      UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_vborder_property,
 						      0);
+			drm_object_attach_property(&radeon_connector->base.base,
+						   rdev->mode_info.audio_property,
+						   RADEON_AUDIO_DISABLE);
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
 			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@
 			break;
 		case DRM_MODE_CONNECTOR_LVDS:
 		case DRM_MODE_CONNECTOR_eDP:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_lvds_bridge_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
+			if (ASIC_IS_DCE2(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.audio_property,
+							      RADEON_AUDIO_DISABLE);
+			}
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
 				drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
+			if (ASIC_IS_DCE2(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.audio_property,
+							      RADEON_AUDIO_DISABLE);
+			}
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
 			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
+			if (ASIC_IS_DCE2(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.audio_property,
+							      RADEON_AUDIO_DISABLE);
+			}
 			connector->interlace_allowed = true;
 			/* in theory with a DP to VGA converter... */
 			connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
 			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				/* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a560844..ac6ece6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_trace.h"
 
 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 {
@@ -80,9 +81,11 @@
 		p->relocs[i].lobj.bo = p->relocs[i].robj;
 		p->relocs[i].lobj.written = !!r->write_domain;
 
-		/* the first reloc of an UVD job is the
-		   msg and that must be in VRAM */
-		if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+		/* the first reloc of an UVD job is the msg and that must be in
+		   VRAM, also but everything into VRAM on AGP cards to avoid
+		   image corruptions */
+		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+		    (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
 			/* TODO: is this still needed for NI+ ? */
 			p->relocs[i].lobj.domain =
 				RADEON_GEM_DOMAIN_VRAM;
@@ -559,6 +562,8 @@
 		return r;
 	}
 
+	trace_radeon_cs(&parser);
+
 	r = radeon_cs_ib_chunk(rdev, &parser);
 	if (r) {
 		goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb879..e29faa7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@
 	/* Registers mapping */
 	/* TODO: block userspace mapping of io register */
 	spin_lock_init(&rdev->mmio_idx_lock);
+	spin_lock_init(&rdev->smc_idx_lock);
+	spin_lock_init(&rdev->pll_idx_lock);
+	spin_lock_init(&rdev->mc_idx_lock);
+	spin_lock_init(&rdev->pcie_idx_lock);
+	spin_lock_init(&rdev->pciep_idx_lock);
+	spin_lock_init(&rdev->pif_idx_lock);
+	spin_lock_init(&rdev->cg_idx_lock);
+	spin_lock_init(&rdev->uvd_idx_lock);
+	spin_lock_init(&rdev->rcu_idx_lock);
+	spin_lock_init(&rdev->didt_idx_lock);
+	spin_lock_init(&rdev->end_idx_lock);
 	if (rdev->family >= CHIP_BONAIRE) {
 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bdd..0d1aa05 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@
 	{ UNDERSCAN_AUTO, "auto" },
 };
 
+static struct drm_prop_enum_list radeon_audio_enum_list[] =
+{	{ RADEON_AUDIO_DISABLE, "off" },
+	{ RADEON_AUDIO_ENABLE, "on" },
+	{ RADEON_AUDIO_AUTO, "auto" },
+};
+
 static int radeon_modeset_create_props(struct radeon_device *rdev)
 {
 	int sz;
@@ -1222,6 +1228,12 @@
 	if (!rdev->mode_info.underscan_vborder_property)
 		return -ENOMEM;
 
+	sz = ARRAY_SIZE(radeon_audio_enum_list);
+	rdev->mode_info.audio_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "audio",
+					 radeon_audio_enum_list, sz);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f..cdd12dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_audio = 0;
+int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d..ef63d3f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@
 	struct drm_property *underscan_property;
 	struct drm_property *underscan_hborder_property;
 	struct drm_property *underscan_vborder_property;
+	/* audio */
+	struct drm_property *audio_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
 	int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@
 	u8 cd_mux_state;
 };
 
+enum radeon_connector_audio {
+	RADEON_AUDIO_DISABLE = 0,
+	RADEON_AUDIO_ENABLE = 1,
+	RADEON_AUDIO_AUTO = 2
+};
+
 struct radeon_connector {
 	struct drm_connector base;
 	uint32_t connector_id;
@@ -489,6 +497,7 @@
 	struct radeon_hpd hpd;
 	struct radeon_router router;
 	struct radeon_i2c_chan *router_bus;
+	enum radeon_connector_audio audio;
 };
 
 struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d755536..87e1d69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@
 
 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
 {
-	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+		mutex_lock(&rdev->pm.mutex);
+		if (power_supply_is_system_supplied() > 0)
+			rdev->pm.dpm.ac_power = true;
+		else
+			rdev->pm.dpm.ac_power = false;
+		if (rdev->asic->dpm.enable_bapm)
+			radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+		mutex_unlock(&rdev->pm.mutex);
+        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
 			mutex_lock(&rdev->pm.mutex);
 			radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@
 				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	int cp = rdev->pm.profile;
 
@@ -349,7 +358,7 @@
 				     const char *buf,
 				     size_t count)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 
 	mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@
 				    struct device_attribute *attr,
 				    char *buf)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	int pm = rdev->pm.pm_method;
 
@@ -397,7 +406,7 @@
 				    const char *buf,
 				    size_t count)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 
 	/* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@
 				    struct device_attribute *attr,
 				    char *buf)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
@@ -447,7 +456,7 @@
 				    const char *buf,
 				    size_t count)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 
 	mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@
 						       struct device_attribute *attr,
 						       char *buf)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
@@ -486,7 +495,7 @@
 						       const char *buf,
 						       size_t count)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	enum radeon_dpm_forced_level level;
 	int ret = 0;
@@ -524,7 +533,7 @@
 				      struct device_attribute *attr,
 				      char *buf)
 {
-	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	int temp;
 
@@ -536,6 +545,23 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 }
 
+static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+	int hyst = to_sensor_dev_attr(attr)->index;
+	int temp;
+
+	if (hyst)
+		temp = rdev->pm.dpm.thermal.min_temp;
+	else
+		temp = rdev->pm.dpm.thermal.max_temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
 static ssize_t radeon_hwmon_show_name(struct device *dev,
 				      struct device_attribute *attr,
 				      char *buf)
@@ -544,16 +570,37 @@
 }
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
 
 static struct attribute *hwmon_attributes[] = {
 	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
 	&sensor_dev_attr_name.dev_attr.attr,
 	NULL
 };
 
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+					struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct radeon_device *rdev = ddev->dev_private;
+
+	/* Skip limit attributes if DPM is not enabled */
+	if (rdev->pm.pm_method != PM_METHOD_DPM &&
+	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+		return 0;
+
+	return attr->mode;
+}
+
 static const struct attribute_group hwmon_attrgroup = {
 	.attrs = hwmon_attributes,
+	.is_visible = hwmon_attributes_visible,
 };
 
 static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@
 
 	radeon_dpm_post_set_power_state(rdev);
 
-	/* force low perf level for thermal */
-	if (rdev->pm.dpm.thermal_active &&
-	    rdev->asic->dpm.force_performance_level) {
-		radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+	if (rdev->asic->dpm.force_performance_level) {
+		if (rdev->pm.dpm.thermal_active)
+			/* force low perf level for thermal */
+			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+		else
+			/* otherwise, enable auto */
+			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
 	}
 
 done:
@@ -1102,9 +1152,10 @@
 {
 	int ret;
 
-	/* default to performance state */
+	/* default to balanced state */
 	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
 	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
 	rdev->pm.default_sclk = rdev->clock.default_sclk;
 	rdev->pm.default_mclk = rdev->clock.default_mclk;
 	rdev->pm.current_sclk = rdev->clock.default_sclk;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd816..f7e3678 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@
 	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
 
+TRACE_EVENT(radeon_cs,
+	    TP_PROTO(struct radeon_cs_parser *p),
+	    TP_ARGS(p),
+	    TP_STRUCT__entry(
+			     __field(u32, ring)
+			     __field(u32, dw)
+			     __field(u32, fences)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->ring = p->ring;
+			   __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+			   __entry->fences = radeon_fence_count_emitted(
+				p->rdev, p->ring);
+			   ),
+	    TP_printk("ring=%u, dw=%u, fences=%u",
+		      __entry->ring, __entry->dw,
+		      __entry->fences)
+);
+
 DECLARE_EVENT_CLASS(radeon_fence_request,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@
 	    TP_ARGS(dev, seqno)
 );
 
-DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
-
-	    TP_ARGS(dev, seqno)
-);
-
 DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8..9566b59 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@
 
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t r;
 
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
 	r = RREG32(RS480_NB_MC_DATA);
 	WREG32(RS480_NB_MC_INDEX, 0xff);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 	return r;
 }
 
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
 	WREG32(RS480_NB_MC_DATA, (v));
 	WREG32(RS480_NB_MC_INDEX, 0xff);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555..6acba80 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@
 
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
 		S_000070_MC_IND_CITF_ARB0(1));
-	return RREG32(R_000074_MC_IND_DATA);
+	r = RREG32(R_000074_MC_IND_DATA);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+	return r;
 }
 
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
 		S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
 	WREG32(R_000074_MC_IND_DATA, v);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb3..1447d79 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@
 
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t r;
 
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
 	r = RREG32(R_00007C_MC_DATA);
 	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 	return r;
 }
 
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
 		S_000078_MC_IND_WR_EN(1));
 	WREG32(R_00007C_MC_DATA, v);
 	WREG32(R_000078_MC_INDEX, 0x7F);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce7..6af8505 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@
 			radeon_crtc = to_radeon_crtc(crtc);
 			pi->crtc_id = radeon_crtc->crtc_id;
 			if (crtc->mode.htotal && crtc->mode.vtotal)
-				pi->refresh_rate =
-					(crtc->mode.clock * 1000) /
-					(crtc->mode.htotal * crtc->mode.vtotal);
+				pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
 			break;
 		}
 	}
@@ -376,9 +374,8 @@
 	WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
 }
 
-static void rs780_force_voltage_to_high(struct radeon_device *rdev)
+static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
 {
-	struct igp_power_info *pi = rs780_get_pi(rdev);
 	struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
 
 	if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@
 	udelay(1);
 
 	WREG32_P(FVTHROT_PWM_CTRL_REG0,
-		 STARTING_PWM_HIGHTIME(pi->max_voltage),
+		 STARTING_PWM_HIGHTIME(voltage),
 		 ~STARTING_PWM_HIGHTIME_MASK);
 
 	WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@
 	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
 }
 
+static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
+{
+	struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+	if (current_state->sclk_low == current_state->sclk_high)
+		return;
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+	WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
+		 ~FORCED_FEEDBACK_DIV_MASK);
+	WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
+		 ~STARTING_FEEDBACK_DIV_MASK);
+	WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+
+	udelay(100);
+
+	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
 static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
 					  struct radeon_ps *new_ps,
 					  struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@
 	if (ret)
 		return ret;
 
-	WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+	if ((min_dividers.ref_div != max_dividers.ref_div) ||
+	    (min_dividers.post_div != max_dividers.post_div) ||
+	    (max_dividers.ref_div != current_max_dividers.ref_div) ||
+	    (max_dividers.post_div != current_max_dividers.post_div))
+		return -EINVAL;
 
-	WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
-		 ~FORCED_FEEDBACK_DIV_MASK);
-	WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
-		 ~STARTING_FEEDBACK_DIV_MASK);
-	WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
-
-	udelay(100);
-
-	WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+	rs780_force_fbdiv(rdev, max_dividers.fb_div);
 
 	if (max_dividers.fb_div > min_dividers.fb_div) {
 		WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@
 	    (new_state->sclk_low == old_state->sclk_low))
 		return;
 
+	if (new_state->sclk_high == new_state->sclk_low)
+		return;
+
 	rs780_clk_scaling_enable(rdev, true);
 }
 
@@ -649,7 +665,7 @@
 	rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
 
 	if (pi->voltage_control) {
-		rs780_force_voltage_to_high(rdev);
+		rs780_force_voltage(rdev, pi->max_voltage);
 		mdelay(5);
 	}
 
@@ -717,14 +733,18 @@
 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
-	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
-		rps->vclk = RS780_DEFAULT_VCLK_FREQ;
-		rps->dclk = RS780_DEFAULT_DCLK_FREQ;
 	} else {
 		rps->vclk = 0;
 		rps->dclk = 0;
 	}
 
+	if (r600_is_uvd_state(rps->class, rps->class2)) {
+		if ((rps->vclk == 0) || (rps->dclk == 0)) {
+			rps->vclk = RS780_DEFAULT_VCLK_FREQ;
+			rps->dclk = RS780_DEFAULT_DCLK_FREQ;
+		}
+	}
+
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 		rdev->pm.dpm.boot_ps = rps;
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@
 		seq_printf(m, "power level 1    sclk: %u vddc_index: %d\n",
 			   ps->sclk_high, ps->max_voltage);
 }
+
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+				      enum radeon_dpm_forced_level level)
+{
+	struct igp_power_info *pi = rs780_get_pi(rdev);
+	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct igp_ps *ps = rs780_get_ps(rps);
+	struct atom_clock_dividers dividers;
+	int ret;
+
+	rs780_clk_scaling_enable(rdev, false);
+	rs780_voltage_scaling_enable(rdev, false);
+
+	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->max_voltage);
+
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     ps->sclk_high, false, &dividers);
+		if (ret)
+			return ret;
+
+		rs780_force_fbdiv(rdev, dividers.fb_div);
+	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+		ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+						     ps->sclk_low, false, &dividers);
+		if (ret)
+			return ret;
+
+		rs780_force_fbdiv(rdev, dividers.fb_div);
+
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->min_voltage);
+	} else {
+		if (pi->voltage_control)
+			rs780_force_voltage(rdev, pi->max_voltage);
+
+		if (ps->sclk_high != ps->sclk_low) {
+			WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+			rs780_clk_scaling_enable(rdev, true);
+		}
+
+		if (pi->voltage_control) {
+			rs780_voltage_scaling_enable(rdev, true);
+			rs780_enable_voltage_scaling(rdev, rps);
+		}
+	}
+
+	rdev->pm.dpm.forced_level = level;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573..873eb4b 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@
 
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
+	unsigned long flags;
 	uint32_t r;
 
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
 	r = RREG32(MC_IND_DATA);
 	WREG32(MC_IND_INDEX, 0);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+
 	return r;
 }
 
 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 	WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
 	WREG32(MC_IND_DATA, (v));
 	WREG32(MC_IND_INDEX, 0);
+	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f201..5811d27 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@
 
 	rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
 
-	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85d..913b025 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@
 		rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
 	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
 
-	ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("rv770_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	return 0;
 }
 
@@ -2147,14 +2141,18 @@
 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
-	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
-		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
-		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
 	} else {
 		rps->vclk = 0;
 		rps->dclk = 0;
 	}
 
+	if (r600_is_uvd_state(rps->class, rps->class2)) {
+		if ((rps->vclk == 0) || (rps->dclk == 0)) {
+			rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+			rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+		}
+	}
+
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
 		rdev->pm.dpm.boot_ps = rps;
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da5..b2a2244 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@
 	0x08, 0x72, 0x08, 0x72
 };
 
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
-			       u16 smc_address, u16 limit)
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+				      u16 smc_address, u16 limit)
 {
 	u32 addr;
 
@@ -296,9 +296,10 @@
 			    u16 smc_start_address, const u8 *src,
 			    u16 byte_count, u16 limit)
 {
+	unsigned long flags;
 	u32 data, original_data, extra_shift;
 	u16 addr;
-	int ret;
+	int ret = 0;
 
 	if (smc_start_address & 3)
 		return -EINVAL;
@@ -307,13 +308,14 @@
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_SRAM_DATA, data);
 
@@ -328,7 +330,7 @@
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_SRAM_DATA);
 
@@ -346,12 +348,15 @@
 
 		ret = rv770_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_SRAM_DATA, data);
 	}
 
-	return 0;
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@
 
 static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
 {
+	unsigned long flags;
 	u16 i;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	for (i = 0;  i < limit; i += 4) {
 		rv770_set_smc_sram_address(rdev, i, limit);
 		WREG32(SMC_SRAM_DATA, 0);
 	}
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 }
 
 int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@
 int rv770_read_smc_sram_dword(struct radeon_device *rdev,
 			      u16 smc_address, u32 *value, u16 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		*value = RREG32(SMC_SRAM_DATA);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	*value = RREG32(SMC_SRAM_DATA);
-
-	return 0;
+	return ret;
 }
 
 int rv770_write_smc_sram_dword(struct radeon_device *rdev,
 			       u16 smc_address, u32 value, u16 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_SRAM_DATA, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_SRAM_DATA, value);
-
-	return 0;
+	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a..3b2c963 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@
 #define RV770_SMC_SOFT_REGISTER_uvd_enabled             0x9C
 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok          0xA0
 
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
-			       u16 smc_address, u16 limit);
 int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
 			    u16 smc_start_address, const u8 *src,
 			    u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e5..1ae2771 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
 #define AFMT_VBI_PACKET_CONTROL              0x7608
 #       define AFMT_GENERIC0_UPDATE          (1 << 2)
 #define AFMT_INFOFRAME_CONTROL0              0x760c
-#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - hdmi regs */
 #       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
 #       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
 #define AFMT_GENERIC0_7                      0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b75..c354c10 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@
 			       uint64_t pe,
 			       uint64_t addr, unsigned count,
 			       uint32_t incr, uint32_t flags);
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+					 bool enable);
 
 static const u32 verde_rlc_save_restore_register_list[] =
 {
@@ -3386,6 +3388,8 @@
 	u32 rb_bufsz;
 	int r;
 
+	si_enable_gui_idle_interrupt(rdev, false);
+
 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
@@ -3501,6 +3505,8 @@
 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
 	}
 
+	si_enable_gui_idle_interrupt(rdev, true);
+
 	return 0;
 }
 
@@ -4888,7 +4894,7 @@
 {
 	u32 tmp;
 
-	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
 		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
 		WREG32(RLC_TTOP_D, tmp);
 
@@ -5250,6 +5256,7 @@
 		  u32 block, bool enable)
 {
 	if (block & RADEON_CG_BLOCK_GFX) {
+		si_enable_gui_idle_interrupt(rdev, false);
 		/* order matters! */
 		if (enable) {
 			si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@
 			si_enable_cgcg(rdev, false);
 			si_enable_mgcg(rdev, false);
 		}
+		si_enable_gui_idle_interrupt(rdev, true);
 	}
 
 	if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@
 			si_init_dma_pg(rdev);
 		}
 		si_init_ao_cu_mask(rdev);
-		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
 			si_init_gfx_cgpg(rdev);
 		}
 		si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@
 {
 	u32 tmp;
 
-	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
 	WREG32(CP_INT_CNTL_RING1, 0);
 	WREG32(CP_INT_CNTL_RING2, 0);
 	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@
 
 int si_irq_set(struct radeon_device *rdev)
 {
-	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl;
 	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
 	u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@
 		return 0;
 	}
 
+	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+
 	if (!ASIC_IS_NODCE(rdev)) {
 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e..cfe5d4d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6075,12 +6075,6 @@
 		return ret;
 	}
 
-	ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
-	if (ret) {
-		DRM_ERROR("si_dpm_force_performance_level failed\n");
-		return ret;
-	}
-
 	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
 			    RADEON_CG_BLOCK_MC |
 			    RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0..d422a1c 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
 #include "ppsmc.h"
 #include "radeon_ucode.h"
 
-int si_set_smc_sram_address(struct radeon_device *rdev,
-			    u32 smc_address, u32 limit)
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+				   u32 smc_address, u32 limit)
 {
 	if (smc_address & 3)
 		return -EINVAL;
@@ -47,7 +47,8 @@
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit)
 {
-	int ret;
+	unsigned long flags;
+	int ret = 0;
 	u32 data, original_data, addr, extra_shift;
 
 	if (smc_start_address & 3)
@@ -57,13 +58,14 @@
 
 	addr = smc_start_address;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	while (byte_count >= 4) {
 		/* SMC address space is BE */
 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 
@@ -78,7 +80,7 @@
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		original_data = RREG32(SMC_IND_DATA_0);
 
@@ -96,11 +98,15 @@
 
 		ret = si_set_smc_sram_address(rdev, addr, limit);
 		if (ret)
-			return ret;
+			goto done;
 
 		WREG32(SMC_IND_DATA_0, data);
 	}
-	return 0;
+
+done:
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+	return ret;
 }
 
 void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@
 
 int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
 {
+	unsigned long flags;
 	u32 ucode_start_address;
 	u32 ucode_size;
 	const u8 *src;
@@ -241,6 +248,7 @@
 		return -EINVAL;
 
 	src = (const u8 *)rdev->smc_fw->data;
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	WREG32(SMC_IND_INDEX_0, ucode_start_address);
 	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
 	while (ucode_size >= 4) {
@@ -253,6 +261,7 @@
 		ucode_size -= 4;
 	}
 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
 	return 0;
 }
@@ -260,25 +269,29 @@
 int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
 			   u32 *value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = si_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	*value = RREG32(SMC_IND_DATA_0);
-	return 0;
+	return ret;
 }
 
 int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
 			    u32 value, u32 limit)
 {
+	unsigned long flags;
 	int ret;
 
+	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
 	ret = si_set_smc_sram_address(rdev, smc_address, limit);
-	if (ret)
-		return ret;
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 
-	WREG32(SMC_IND_DATA_0, value);
-	return 0;
+	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c..96ea6db8 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@
 	if (pi->enable_dpm)
 		sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
 
-	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8..7f998bf 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@
 	pi->requested_rps.ps_priv = &pi->requested_ps;
 }
 
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+	if (pi->enable_bapm) {
+		trinity_acquire_mutex(rdev);
+		trinity_dpm_bapm_enable(rdev, enable);
+		trinity_release_mutex(rdev);
+	}
+}
+
 int trinity_dpm_enable(struct radeon_device *rdev)
 {
 	struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@
 	trinity_program_sclk_dpm(rdev);
 	trinity_start_dpm(rdev);
 	trinity_wait_for_dpm_enabled(rdev);
+	trinity_dpm_bapm_enable(rdev, false);
 	trinity_release_mutex(rdev);
 
 	if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@
 		trinity_release_mutex(rdev);
 		return;
 	}
+	trinity_dpm_bapm_enable(rdev, false);
 	trinity_disable_clock_power_gating(rdev);
 	sumo_clear_vc(rdev);
 	trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@
 
 	trinity_acquire_mutex(rdev);
 	if (pi->enable_dpm) {
+		if (pi->enable_bapm)
+			trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
 		trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
 		trinity_enable_power_level_0(rdev);
 		trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@
 		trinity_force_level_0(rdev);
 		trinity_unforce_levels(rdev);
 		trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
-		rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
 	}
 	trinity_release_mutex(rdev);
 
@@ -1854,6 +1868,7 @@
 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
 		pi->at[i] = TRINITY_AT_DFLT;
 
+	pi->enable_bapm = true;
 	pi->enable_nbps_policy = true;
 	pi->enable_sclk_ds = true;
 	pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df07..c261657 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@
 	bool enable_auto_thermal_throttling;
 	bool enable_dpm;
 	bool enable_sclk_ds;
+	bool enable_bapm;
 	bool uvd_dpm;
 	struct radeon_ps current_rps;
 	struct trinity_ps current_ps;
@@ -118,6 +119,7 @@
 #define TRINITY_AT_DFLT            30
 
 /* trinity_smc.c */
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
 int trinity_dpm_config(struct radeon_device *rdev, bool enable);
 int trinity_uvd_dpm_config(struct radeon_device *rdev);
 int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f..9672bcb 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@
 	return 0;
 }
 
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+	else
+		return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
 int trinity_dpm_config(struct radeon_device *rdev, bool enable)
 {
 	if (enable)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0..8bf6461 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@
 	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 	switch (ret) {
 	case -EAGAIN:
-		set_need_resched();
 	case 0:
 	case -ERESTARTSYS:
 		return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e..af02597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@
 		if (!conflict->bridge_has_one_vga) {
 			vga_irq_set_state(conflict, false);
 			flags |= PCI_VGA_STATE_CHANGE_DECODES;
-			if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+			if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
 				pci_bits |= PCI_COMMAND_MEMORY;
-			if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+			if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
 				pci_bits |= PCI_COMMAND_IO;
 		}
 
@@ -267,11 +267,11 @@
 			flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
 
 		pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
-		conflict->owns &= ~lwants;
+		conflict->owns &= ~match;
 		/* If he also owned non-legacy, that is no longer the case */
-		if (lwants & VGA_RSRC_LEGACY_MEM)
+		if (match & VGA_RSRC_LEGACY_MEM)
 			conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
-		if (lwants & VGA_RSRC_LEGACY_IO)
+		if (match & VGA_RSRC_LEGACY_IO)
 			conflict->owns &= ~VGA_RSRC_NORMAL_IO;
 	}
 
@@ -644,10 +644,12 @@
 static inline void vga_update_device_decodes(struct vga_device *vgadev,
 					     int new_decodes)
 {
-	int old_decodes;
-	struct vga_device *new_vgadev, *conflict;
+	int old_decodes, decodes_removed, decodes_unlocked;
 
 	old_decodes = vgadev->decodes;
+	decodes_removed = ~new_decodes & old_decodes;
+	decodes_unlocked = vgadev->locks & decodes_removed;
+	vgadev->owns &= ~decodes_removed;
 	vgadev->decodes = new_decodes;
 
 	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@
 		vga_iostate_to_str(vgadev->decodes),
 		vga_iostate_to_str(vgadev->owns));
 
-
-	/* if we own the decodes we should move them along to
-	   another card */
-	if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
-		/* set us to own nothing */
-		vgadev->owns &= ~old_decodes;
-		list_for_each_entry(new_vgadev, &vga_list, list) {
-			if ((new_vgadev != vgadev) &&
-			    (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
-				pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
-				conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
-				if (!conflict)
-					__vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
-				break;
-			}
-		}
+	/* if we removed locked decodes, lock count goes to zero, and release */
+	if (decodes_unlocked) {
+		if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+			vgadev->io_lock_cnt = 0;
+		if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+			vgadev->mem_lock_cnt = 0;
+		__vga_put(vgadev, decodes_unlocked);
 	}
 
 	/* change decodes counter */
-	if (old_decodes != new_decodes) {
-		if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
-			vga_decode_count++;
-		else
-			vga_decode_count--;
-	}
+	if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+	    !(new_decodes & VGA_RSRC_LEGACY_MASK))
+		vga_decode_count--;
+	if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+	    new_decodes & VGA_RSRC_LEGACY_MASK)
+		vga_decode_count++;
 	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
 }
 
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fd54a14..3d79e51 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -12,11 +12,14 @@
 	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index d6aeaf3..cb65fa1 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -15,6 +15,7 @@
 #define _EXYNOS_DRM_H_
 
 #include <uapi/drm/exynos_drm.h>
+#include <video/videomode.h>
 
 /**
  * A structure for lcd panel information.
@@ -24,7 +25,7 @@
  * @height_mm: physical size of lcd height.
  */
 struct exynos_drm_panel_info {
-	struct fb_videomode timing;
+	struct videomode vm;
 	u32 width_mm;
 	u32 height_mm;
 };
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 63d609d..3abfa6e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,6 +26,7 @@
 #ifndef _I915_DRM_H_
 #define _I915_DRM_H_
 
+#include <drm/i915_pciids.h>
 #include <uapi/drm/i915_drm.h>
 
 /* For use by IPS driver */
@@ -34,4 +35,37 @@
 extern bool i915_gpu_lower(void);
 extern bool i915_gpu_busy(void);
 extern bool i915_gpu_turbo_disable(void);
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL		0x52
+#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+#define SNB_GMCH_CTRL		0x50
+#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
+#define    SNB_GMCH_GGMS_MASK	0x3
+#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
+#define    SNB_GMCH_GMS_MASK    0x1f
+
+#define I830_GMCH_CTRL			0x52
+
+#define I855_GMCH_GMS_MASK		0xF0
+#define I855_GMCH_GMS_STOLEN_0M		0x0
+#define I855_GMCH_GMS_STOLEN_1M		(0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M		(0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M		(0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M	(0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M	(0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
+
 #endif				/* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 0000000..8a10f5c
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _I915_PCIIDS_H
+#define _I915_PCIIDS_H
+
+/*
+ * A pci_device_id struct {
+ *	__u32 vendor, device;
+ *      __u32 subvendor, subdevice;
+ *	__u32 class, class_mask;
+ *	kernel_ulong_t driver_data;
+ * };
+ * Don't use C99 here because "class" is reserved and we want to
+ * give userspace flexibility.
+ */
+#define INTEL_VGA_DEVICE(id, info) {		\
+	0x8086,	id,				\
+	~0, ~0,					\
+	0x030000, 0xff0000,			\
+	(unsigned long) info }
+
+#define INTEL_QUANTA_VGA_DEVICE(info) {		\
+	0x8086,	0x16a,				\
+	0x152d,	0x8990,				\
+	0x030000, 0xff0000,			\
+	(unsigned long) info }
+
+#define INTEL_I830_IDS(info)				\
+	INTEL_VGA_DEVICE(0x3577, info)
+
+#define INTEL_I845G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2562, info)
+
+#define INTEL_I85X_IDS(info)				\
+	INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
+	INTEL_VGA_DEVICE(0x358e, info)
+
+#define INTEL_I865G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
+
+#define INTEL_I915G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
+	INTEL_VGA_DEVICE(0x258a, info)  /* E7221_G */
+
+#define INTEL_I915GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
+
+#define INTEL_I945G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
+
+#define INTEL_I945GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
+	INTEL_VGA_DEVICE(0x27ae, info)  /* I945_GME */
+
+#define INTEL_I965G_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */	\
+	INTEL_VGA_DEVICE(0x2982, info),	/* G35_G */	\
+	INTEL_VGA_DEVICE(0x2992, info),	/* I965_Q */	\
+	INTEL_VGA_DEVICE(0x29a2, info)	/* I965_G */
+
+#define INTEL_G33_IDS(info)				\
+	INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
+	INTEL_VGA_DEVICE(0x29c2, info),	/* G33_G */ \
+	INTEL_VGA_DEVICE(0x29d2, info)	/* Q33_G */
+
+#define INTEL_I965GM_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2a02, info),	/* I965_GM */ \
+	INTEL_VGA_DEVICE(0x2a12, info)  /* I965_GME */
+
+#define INTEL_GM45_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
+
+#define INTEL_G45_IDS(info)				\
+	INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
+	INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
+	INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
+	INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
+	INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
+	INTEL_VGA_DEVICE(0x2e92, info)	/* B43_G.1 */
+
+#define INTEL_PINEVIEW_IDS(info)			\
+	INTEL_VGA_DEVICE(0xa001, info),			\
+	INTEL_VGA_DEVICE(0xa011, info)
+
+#define INTEL_IRONLAKE_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0042, info)
+
+#define INTEL_IRONLAKE_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0046, info)
+
+#define INTEL_SNB_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0102, info), \
+	INTEL_VGA_DEVICE(0x0112, info), \
+	INTEL_VGA_DEVICE(0x0122, info), \
+	INTEL_VGA_DEVICE(0x010A, info)
+
+#define INTEL_SNB_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0106, info), \
+	INTEL_VGA_DEVICE(0x0116, info), \
+	INTEL_VGA_DEVICE(0x0126, info)
+
+#define INTEL_IVB_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0166, info)  /* GT2 mobile */
+
+#define INTEL_IVB_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
+	INTEL_VGA_DEVICE(0x016a, info)  /* GT2 server */
+
+#define INTEL_IVB_Q_IDS(info) \
+	INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
+
+#define INTEL_HSW_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+	INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+	INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+	INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
+	INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
+	INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
+	INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
+	INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+	INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+	INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+	INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+	INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+	INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
+	INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
+	INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
+	INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
+
+#define INTEL_HSW_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
+	INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+	INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
+	INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+	INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
+	INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
+	INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+	INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
+
+#define INTEL_VLV_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0f30, info), \
+	INTEL_VGA_DEVICE(0x0f31, info), \
+	INTEL_VGA_DEVICE(0x0f32, info), \
+	INTEL_VGA_DEVICE(0x0f33, info), \
+	INTEL_VGA_DEVICE(0x0157, info)
+
+#define INTEL_VLV_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0155, info)
+
+#endif /* _I915_PCIIDS_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a..80cf817 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,15 @@
  *     out of the arbitration process (and can be safe to take
  *     interrupts at any time.
  */
+#if defined(CONFIG_VGA_ARB)
 extern void vga_set_legacy_decoding(struct pci_dev *pdev,
 				    unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+					   unsigned int decodes)
+{
+}
+#endif
 
 /**
  *     vga_get         - acquire & locks VGA resources