From 651b07c647c66c57b7d6acca2d58b3dd21069b2a Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:43:53 -0400
Subject: [PATCH 01/56] rpmlint fluff.

kernel.src:386: W: macro-in-comment %nobuildarches
kernel.src:432: W: macro-in-comment %post
kernel.src:1002: W: macro-in-comment %{vanillaversion}
kernel.src:1003: W: macro-in-comment %{kversion}
kernel.src:1005: W: macro-in-comment %{kversion}
kernel.src:1883: W: macro-in-comment %{image_install_path}

(Packagers should avoid using macros in comments, but this is a minor error.
The easy fix here is to %% comment out any macros in comments, to prevent
unexpected and random behavior.)
---
 kernel.spec | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index 3cfabdc10..56c8e5349 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -383,7 +383,7 @@ Summary: The Linux kernel
 %endif
 
 # To temporarily exclude an architecture from being built, add it to
-# %nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we
+# %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we
 # don't build kernel-headers then the new build system will no longer let
 # us use the previous build of that package -- it'll just be completely AWOL.
 # Which is a BadThing(tm).
@@ -429,7 +429,7 @@ Summary: The Linux kernel
 %define kernel_headers_conflicts libdrm-devel < 2.4.0-0.15
 
 #
-# Packages that need to be installed before the kernel is, because the %post
+# Packages that need to be installed before the kernel is, because the %%post
 # scripts use them.
 #
 %define kernel_prereq  fileutils, module-init-tools, initscripts >= 8.11.1-1, grubby >= 7.0.10-1
@@ -999,10 +999,10 @@ ApplyOptionalPatch()
 %endif
 %endif
 
-# %{vanillaversion} : the full version name, e.g. 2.6.35-rc6-git3
-# %{kversion}       : the base version, e.g. 2.6.34
+# %%{vanillaversion} : the full version name, e.g. 2.6.35-rc6-git3
+# %%{kversion}       : the base version, e.g. 2.6.34
 
-# Use kernel-%{kversion}%{?dist} as the top-level directory name
+# Use kernel-%%{kversion}%%{?dist} as the top-level directory name
 # so we can prep different trees within a single git directory.
 
 # Build a list of the other top-level kernel tree directories.
@@ -1880,7 +1880,7 @@ fi
 %{_mandir}/man[1-8]/*
 %endif
 
-# This is %{image_install_path} on an arch where that includes ELF files,
+# This is %%{image_install_path} on an arch where that includes ELF files,
 # or empty otherwise.
 %define elf_image_install_path %{?kernel_image_elf:%{image_install_path}}
 

From 66121abff426afb0ec26f7ec9f4d0b697b842e0b Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:46:03 -0400
Subject: [PATCH 02/56] These haven't been shipped in sometime, we can just
 remove it.

This also has the effect of silencing the rpmlint warning about
an unversioned obsolete.
---
 kernel.spec | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index 56c8e5349..3bccd39c7 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -481,9 +481,9 @@ ExclusiveArch: noarch %{all_x86} x86_64 ppc ppc64 ia64 sparc sparc64 s390 s390x
 ExclusiveOS: Linux
 
 %kernel_reqprovconf
-%ifarch x86_64 sparc64
-Obsoletes: kernel-smp
-%endif
+
+
+
 
 
 #

From a4246dab4426932bdb452ecfacf430fc74e860ff Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:47:32 -0400
Subject: [PATCH 03/56] Fix another rpmlint obsoletes warning

kernel.src:757: W: unversioned-explicit-obsoletes glibc-kernheaders
kernel-headers.x86_64: W: self-obsoletion glibc-kernheaders obsoletes
glibc-kernheaders = 3.0-46
---
 kernel.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 3bccd39c7..a124ff5ac 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -754,7 +754,7 @@ options that can be passed to Linux kernel modules at load time.
 %package headers
 Summary: Header files for the Linux kernel for use by glibc
 Group: Development/System
-Obsoletes: glibc-kernheaders
+Obsoletes: glibc-kernheaders < 3.0-46
 Provides: glibc-kernheaders = 3.0-46
 %description headers
 Kernel-headers includes the C header files that specify the interface

From b428b84658a878cb7b37a2eb8dfa2efa58ad9585 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:49:21 -0400
Subject: [PATCH 04/56] stop using the obsolete forms of grep shortcuts.

---
 kernel.spec | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index a124ff5ac..4ebbfaeba 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -941,7 +941,7 @@ ApplyPatch()
     exit 1
   fi
 %if !%{using_upstream_branch}
-  if ! egrep "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then
+  if ! grep -E "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then
     if [ "${patch:0:10}" != "patch-2.6." ] ; then
       echo "ERROR: Patch  $patch  not listed as a source patch in specfile"
       exit 1
@@ -1376,7 +1376,7 @@ for i in *.config
 do
   mv $i .config
   Arch=`head -1 .config | cut -b 3-`
-  make ARCH=$Arch listnewconfig | egrep '^CONFIG_' >.newoptions || true
+  make ARCH=$Arch listnewconfig | grep -E '^CONFIG_' >.newoptions || true
 %if %{listnewconfig_fail}
   if [ -s .newoptions ]; then
     cat .newoptions
@@ -1573,7 +1573,7 @@ BuildKernel() {
 
     # Generate a list of modules for block and networking.
 
-    fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA |
+    grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA |
     sed -n 's,^.*/\([^/]*\.ko\):  *U \(.*\)$,\1 \2,p' > drivers.undef
 
     collect_modules_list()
@@ -1599,7 +1599,7 @@ BuildKernel() {
       /sbin/modinfo -l $i >> modinfo
     done < modnames
 
-    egrep -v \
+    grep -E -v \
     	  'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \
 	  modinfo && exit 1
 

From 67d708bd25d4465c24a165c9cf4b0cb0242fcdd9 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:50:26 -0400
Subject: [PATCH 05/56] more rpmlint..

kernel.src:2188: W: macro-in-%changelog %released_kernel
kernel.src:2232: W: macro-in-%changelog %verify
kernel.src:2411: W: macro-in-%changelog %{expand

(Packagers should avoid using macros in changelog items, but this is a minor
error. The easy fix here is to %% comment out any macros in changelog entries,
to prevent unexpected and random behavior.)
---
 kernel.spec | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index 4ebbfaeba..66f343fdb 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -2185,7 +2185,7 @@ fi
   numbers.
 
 * Wed Aug 04 2010 Kyle McMartin <kyle@redhat.com>
-- Disable %released_kernel.
+- Disable %%released_kernel.
 - This is properly 2.6.36-0.git1, unlike the last commit. Had to make
   a mistake sometime, I'm glad it was early.
 
@@ -2229,7 +2229,7 @@ fi
 - Linux 2.6.35-rc5-git7
 
 * Wed Jul 21 2010 Dave Jones <davej@redhat.com>
-- Remove the %verify (no mtime) on kernel-devel's files.
+- Remove the %%verify (no mtime) on kernel-devel's files.
   If they got modified, they should fail rpm verify.
 
 * Wed Jul 21 2010 Dave Jones <davej@redhat.com>
@@ -2408,7 +2408,7 @@ fi
   having trouble finding anyone who actually uses it.
 
 * Tue Jun 15 2010 Kyle McMartin <kyle@redhat.com> 2.6.34-38
-- Fix build by nuking superfluous "%{expand" which was missing a
+- Fix build by nuking superfluous "%%{expand" which was missing a
   trailing '}'. You may now reward me with an array of alcoholic
   beverages, I so richly deserve for spending roughly a full
   day staring at the diff of the spec.

From 50dccdcd821c7e000cc7a78b281b7b1b2555b9e2 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:51:00 -0400
Subject: [PATCH 06/56] whitespace cleanup

---
 kernel.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 66f343fdb..26545a0ef 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -144,7 +144,7 @@ Summary: The Linux kernel
 %define with_dbgonly   %{?_with_dbgonly:      1} %{?!_with_dbgonly:      0}
 
 # should we do C=1 builds with sparse
-%define with_sparse	%{?_with_sparse:      1} %{?!_with_sparse:      0}
+%define with_sparse    %{?_with_sparse:       1} %{?!_with_sparse:       0}
 
 # Set debugbuildsenabled to 1 for production (build separate debug kernels)
 #  and 0 for rawhide (all kernels are debug kernels).

From 3405f5fbff9f3829c854e0c2e60f93a0f3d7b471 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:52:03 -0400
Subject: [PATCH 07/56] more rpmlint: E: useless-provides kernel

(This is coming from line 445: Provides: kernel =
%{rpmversion}-%{pkg_release}\.
I don't see any reason for this line, as rpm is autoproviding kernel =
%{version}-%{release} (and %{version} is being set to %{rpmversion} and
%{release} is being set to %{pkg_release}.)
---
 kernel.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 26545a0ef..e308537d1 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -442,7 +442,7 @@ Summary: The Linux kernel
 # macros defined above.
 #
 %define kernel_reqprovconf \
-Provides: kernel = %{rpmversion}-%{pkg_release}\
+\
 Provides: kernel-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:.%{1}}\
 Provides: kernel-drm = 4.3.0\
 Provides: kernel-drm-nouveau = 16\

From c6761a0576d0de7c9427fd9a968b682386fb51eb Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:56:20 -0400
Subject: [PATCH 08/56] remove .gitignores from the source tree, so they don't
 end up getting packaged.

---
 kernel.spec | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/kernel.spec b/kernel.spec
index e308537d1..8ed80f06a 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1394,6 +1394,9 @@ done
 # get rid of unwanted files resulting from patch fuzz
 find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null
 
+# remove unnecessary SCM files
+find . -name .gitignore -exec rm -f {} \; >/dev/null
+
 cd ..
 
 ###

From c1bdfc8be92de39f3a97155a37c1a138d24709b1 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 13:59:09 -0400
Subject: [PATCH 09/56] remove build cruft that shouldn't be packaged in
 kernel-devel

---
 kernel.spec | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/kernel.spec b/kernel.spec
index 8ed80f06a..830edb803 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1736,6 +1736,8 @@ find $RPM_BUILD_ROOT/usr/include \
      \( -name .install -o -name .check -o \
      	-name ..install.cmd -o -name ..check.cmd \) | xargs rm -f
 
+find $RPM_BUILD_ROOT/usr/src/kernels -name .*.cmd -exec rm -f {} \;
+
 # glibc provides scsi headers for itself, for now
 rm -rf $RPM_BUILD_ROOT/usr/include/scsi
 rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h

From 556dd8ab05b082e3aaddb1cf641506f49689e2a9 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 14:12:21 -0400
Subject: [PATCH 10/56] remove some temporary stuff I did to keep the line
 numbers the same during the rpmlint review

---
 kernel.spec | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index 830edb803..05fa05295 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -442,7 +442,6 @@ Summary: The Linux kernel
 # macros defined above.
 #
 %define kernel_reqprovconf \
-\
 Provides: kernel-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:.%{1}}\
 Provides: kernel-drm = 4.3.0\
 Provides: kernel-drm-nouveau = 16\
@@ -482,10 +481,6 @@ ExclusiveOS: Linux
 
 %kernel_reqprovconf
 
-
-
-
-
 #
 # List the packages used during the kernel build
 #

From 917df9870301f8b995fca97c8fe2a775eb650e91 Mon Sep 17 00:00:00 2001
From: Dave Jones <davej@redhat.com>
Date: Tue, 26 Oct 2010 15:04:04 -0400
Subject: [PATCH 11/56] Put back the provides: kernel

It's necessary for kernel-smp, which ppc32 still uses.
---
 kernel.spec | 1 +
 1 file changed, 1 insertion(+)

diff --git a/kernel.spec b/kernel.spec
index 05fa05295..a7377aa7d 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -442,6 +442,7 @@ Summary: The Linux kernel
 # macros defined above.
 #
 %define kernel_reqprovconf \
+Provides: kernel = %{rpmversion}-%{pkg_release}\
 Provides: kernel-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:.%{1}}\
 Provides: kernel-drm = 4.3.0\
 Provides: kernel-drm-nouveau = 16\

From 8b813d3c547a30e99036746ca7d0db92bef272be Mon Sep 17 00:00:00 2001
From: Michael Young <m.a.young@durham.ac.uk>
Date: Sat, 30 Oct 2010 23:02:42 +0100
Subject: [PATCH 12/56] Quote wildcard argument in "find" command

---
 kernel.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index a7377aa7d..7c6c4e75c 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1732,7 +1732,7 @@ find $RPM_BUILD_ROOT/usr/include \
      \( -name .install -o -name .check -o \
      	-name ..install.cmd -o -name ..check.cmd \) | xargs rm -f
 
-find $RPM_BUILD_ROOT/usr/src/kernels -name .*.cmd -exec rm -f {} \;
+find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \;
 
 # glibc provides scsi headers for itself, for now
 rm -rf $RPM_BUILD_ROOT/usr/include/scsi

From 4cd6a7f0577c367964fcaf08a1049b08c7c4efef Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 15 Nov 2010 20:13:18 -0500
Subject: [PATCH 13/56] fix intel_ips driver

---
 ...power-monitoring-registers-on-resume.patch | 49 +++++++++++++++++++
 kernel.spec                                   |  8 ++-
 2 files changed, 56 insertions(+), 1 deletion(-)
 create mode 100644 drm-i915-reprogram-power-monitoring-registers-on-resume.patch

diff --git a/drm-i915-reprogram-power-monitoring-registers-on-resume.patch b/drm-i915-reprogram-power-monitoring-registers-on-resume.patch
new file mode 100644
index 000000000..a37442607
--- /dev/null
+++ b/drm-i915-reprogram-power-monitoring-registers-on-resume.patch
@@ -0,0 +1,49 @@
+From 48fcfc888b48ad49dd83faa107264bbfb0089cad Mon Sep 17 00:00:00 2001
+From: Kyle McMartin <kyle@redhat.com>
+Date: Wed, 3 Nov 2010 16:27:57 -0400
+Subject: [PATCH] i915: reprogram power monitoring registers on resume
+
+Fixes issue where i915_gfx_val was reporting values several
+orders of magnitude higher than physically possible (without
+leaving scorch marks on my thighs at least.)
+
+Signed-off-by: Kyle McMartin <kyle@redhat.com>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+---
+ drivers/gpu/drm/i915/i915_suspend.c |    4 +++-
+ drivers/gpu/drm/i915/intel_drv.h    |    1 +
+ 2 files changed, 4 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 989c19d..454c064 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
+ 	/* Clock gating state */
+ 	intel_init_clock_gating(dev);
+ 
+-	if (HAS_PCH_SPLIT(dev))
++	if (HAS_PCH_SPLIT(dev)) {
+ 		ironlake_enable_drps(dev);
++		intel_init_emon(dev);
++	}
+ 
+ 	/* Cache mode state */
+ 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 9af9f86..21551fe 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ extern void intel_init_clock_gating(struct drm_device *dev);
+ extern void ironlake_enable_drps(struct drm_device *dev);
+ extern void ironlake_disable_drps(struct drm_device *dev);
++extern void intel_init_emon(struct drm_device *dev);
+ 
+ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ 				      struct drm_gem_object *obj,
+-- 
+1.7.3.2
+
diff --git a/kernel.spec b/kernel.spec
index 7c6c4e75c..ec7ee2bd3 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 1
+%global baserelease 2
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -654,6 +654,7 @@ Patch1819: drm-intel-big-hammer.patch
 Patch1824: drm-intel-next.patch
 # make sure the lvds comes back on lid open
 Patch1825: drm-intel-make-lvds-work.patch
+Patch1826: drm-i915-reprogram-power-monitoring-registers-on-resume.patch
 Patch1900: linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -1270,6 +1271,7 @@ ApplyOptionalPatch drm-nouveau-updates.patch
 ApplyOptionalPatch drm-intel-next.patch
 ApplyPatch drm-intel-big-hammer.patch
 ApplyPatch drm-intel-make-lvds-work.patch
+ApplyPatch drm-i915-reprogram-power-monitoring-registers-on-resume.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -1953,6 +1955,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-2
+- drm-i915-reprogram-power-monitoring-registers-on-resume.patch: fix intel_ips
+  driver.
+
 * Wed Oct 20 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.36-1
 - Linux 2.6.36
 

From a568a0d41b42e78b2d8c05f9300de0e31fcee780 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 15 Nov 2010 22:35:24 -0500
Subject: [PATCH 14/56] pull in support for MBA3

---
 add-macbookair3-ids.patch | 263 ++++++++++++++++++++++++++++++++++++++
 kernel.spec               |   5 +
 2 files changed, 268 insertions(+)
 create mode 100644 add-macbookair3-ids.patch

diff --git a/add-macbookair3-ids.patch b/add-macbookair3-ids.patch
new file mode 100644
index 000000000..e817c078e
--- /dev/null
+++ b/add-macbookair3-ids.patch
@@ -0,0 +1,263 @@
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/bluetooth/btusb.c kernel-2.6.35.fc14.new/drivers/bluetooth/btusb.c
+--- kernel-2.6.35.fc14.orig/drivers/bluetooth/btusb.c	2010-11-12 12:35:49.390791080 +0100
++++ kernel-2.6.35.fc14.new/drivers/bluetooth/btusb.c	2010-11-12 12:48:22.090611963 +0100
+@@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[
+ 	/* Apple MacBookPro6,2 */
+ 	{ USB_DEVICE(0x05ac, 0x8218) },
+ 
++	/* Apple MacBookAir3,1, MacBookAir3,2 */
++	{ USB_DEVICE(0x05ac, 0x821b) },
++
+ 	/* AVM BlueFRITZ! USB v2.0 */
+ 	{ USB_DEVICE(0x057c, 0x3800) },
+ 
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/hid/hid-apple.c kernel-2.6.35.fc14.new/drivers/hid/hid-apple.c
+--- kernel-2.6.35.fc14.orig/drivers/hid/hid-apple.c	2010-11-12 12:35:49.153805968 +0100
++++ kernel-2.6.35.fc14.new/drivers/hid/hid-apple.c	2010-11-12 12:48:35.689816431 +0100
+@@ -59,6 +59,27 @@ struct apple_key_translation {
+ 	u8 flags;
+ };
+ 
++static const struct apple_key_translation macbookair_fn_keys[] = {
++	{ KEY_BACKSPACE, KEY_DELETE },
++	{ KEY_ENTER,	KEY_INSERT },
++	{ KEY_F1,	KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
++	{ KEY_F2,	KEY_BRIGHTNESSUP,   APPLE_FLAG_FKEY },
++	{ KEY_F3,	KEY_SCALE,          APPLE_FLAG_FKEY },
++	{ KEY_F4,	KEY_DASHBOARD,      APPLE_FLAG_FKEY },
++	{ KEY_F6,	KEY_PREVIOUSSONG,   APPLE_FLAG_FKEY },
++	{ KEY_F7,	KEY_PLAYPAUSE,      APPLE_FLAG_FKEY },
++	{ KEY_F8,	KEY_NEXTSONG,       APPLE_FLAG_FKEY },
++	{ KEY_F9,	KEY_MUTE,           APPLE_FLAG_FKEY },
++	{ KEY_F10,	KEY_VOLUMEDOWN,     APPLE_FLAG_FKEY },
++	{ KEY_F11,	KEY_VOLUMEUP,       APPLE_FLAG_FKEY },
++	{ KEY_F12,	KEY_EJECTCD,        APPLE_FLAG_FKEY },
++	{ KEY_UP,	KEY_PAGEUP },
++	{ KEY_DOWN,	KEY_PAGEDOWN },
++	{ KEY_LEFT,	KEY_HOME },
++	{ KEY_RIGHT,	KEY_END },
++	{ }
++};
++
+ static const struct apple_key_translation apple_fn_keys[] = {
+ 	{ KEY_BACKSPACE, KEY_DELETE },
+ 	{ KEY_ENTER,	KEY_INSERT },
+@@ -157,10 +178,14 @@ static int hidinput_apple_event(struct h
+ 	if (fnmode) {
+ 		int do_translate;
+ 
+-		trans = apple_find_translation((hid->product < 0x21d ||
++		if(hid->product >= 0x023f && hid->product <= 0x0244 ) {
++			trans = apple_find_translation(macbookair_fn_keys, usage->code);
++		} else {
++			trans = apple_find_translation((hid->product < 0x21d ||
+ 					hid->product >= 0x300) ?
+ 					powerbook_fn_keys : apple_fn_keys,
+ 					usage->code);
++		}
+ 		if (trans) {
+ 			if (test_bit(usage->code, asc->pressed_fn))
+ 				do_translate = 1;
+@@ -435,6 +460,18 @@ static const struct hid_device_id apple_
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
++		.driver_data = APPLE_HAS_FN },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
++		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
++		.driver_data = APPLE_HAS_FN },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
++		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/hid/hid-core.c kernel-2.6.35.fc14.new/drivers/hid/hid-core.c
+--- kernel-2.6.35.fc14.orig/drivers/hid/hid-core.c	2010-11-12 12:35:49.153805968 +0100
++++ kernel-2.6.35.fc14.new/drivers/hid/hid-core.c	2010-11-12 12:48:35.690816373 +0100
+@@ -1273,6 +1273,12 @@ static const struct hid_device_id hid_bl
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -1738,6 +1744,12 @@ static const struct hid_device_id hid_mo
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ 	{ }
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/hid/hid-ids.h kernel-2.6.35.fc14.new/drivers/hid/hid-ids.h
+--- kernel-2.6.35.fc14.orig/drivers/hid/hid-ids.h	2010-11-12 12:35:49.153805968 +0100
++++ kernel-2.6.35.fc14.new/drivers/hid/hid-ids.h	2010-11-12 12:48:35.691816314 +0100
+@@ -93,6 +93,12 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI	0x0236
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO	0x0237
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS	0x0238
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI	0x023f
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO	0x0240
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS	0x0241
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI	0x0242
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO	0x0243
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS	0x0244
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/hwmon/applesmc.c kernel-2.6.35.fc14.new/drivers/hwmon/applesmc.c
+--- kernel-2.6.35.fc14.orig/drivers/hwmon/applesmc.c	2010-11-12 12:35:49.618776754 +0100
++++ kernel-2.6.35.fc14.new/drivers/hwmon/applesmc.c	2010-11-13 12:25:05.810472278 +0100
+@@ -162,6 +162,10 @@ static const char *temperature_sensors_s
+ /* Set 22: MacBook Pro 7,1 */
+ 	{ "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
+ 	  "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
++/* Set 23: MacBook Air 3,1 */
++	{ "TB0T", "TB1T", "TB2T", "TC0D", "TC0E", "TC0P", "TC1E", "TCZ3",
++	  "TCZ4", "TCZ5", "TG0E", "TG1E", "TG2E", "TGZ3", "TGZ4", "TGZ5",
++	  "TH0F", "TH0O", "TM0P" },
+ };
+ 
+ /* List of keys used to read/write fan speeds */
+@@ -1524,11 +1528,21 @@ static __initdata struct dmi_match_data 
+ 	{ .accelerometer = 1, .light = 1, .temperature_set = 21 },
+ /* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
+ 	{ .accelerometer = 1, .light = 1, .temperature_set = 22 },
++/* MacBook Air 3,1: accelerometer, backlight and temperature set 15 */
++	{ .accelerometer = 0, .light = 0, .temperature_set = 23 },
+ };
+ 
+ /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
+  * So we need to put "Apple MacBook Pro" before "Apple MacBook". */
+ static __initdata struct dmi_system_id applesmc_whitelist[] = {
++	{ applesmc_dmi_match, "Apple MacBook Air 3", {
++	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
++	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2") },
++		&applesmc_dmi_data[23]},
++	{ applesmc_dmi_match, "Apple MacBook Air 3", {
++	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
++	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1") },
++		&applesmc_dmi_data[23]},
+ 	{ applesmc_dmi_match, "Apple MacBook Air 2", {
+ 	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ 	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/input/mouse/bcm5974.c kernel-2.6.35.fc14.new/drivers/input/mouse/bcm5974.c
+--- kernel-2.6.35.fc14.orig/drivers/input/mouse/bcm5974.c	2010-11-12 12:35:50.004752503 +0100
++++ kernel-2.6.35.fc14.new/drivers/input/mouse/bcm5974.c	2010-11-12 12:48:13.140136374 +0100
+@@ -55,6 +55,14 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI	0x0236
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO	0x0237
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS	0x0238
++/* MacbookAir3,2 (unibody), aka wellspring5 */
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI	0x023f
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO	0x0240
++#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS	0x0241
++/* MacbookAir3,1 (unibody), aka wellspring4 */
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI	0x0242
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO	0x0243
++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS	0x0244
+ 
+ #define BCM5974_DEVICE(prod) {					\
+ 	.match_flags = (USB_DEVICE_ID_MATCH_DEVICE |		\
+@@ -80,6 +88,14 @@ static const struct usb_device_id bcm597
+ 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
+ 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
+ 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
++	/* MacbookAir3,2 */
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
++	/* MacbookAir3,1 */
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
++	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
+ 	/* Terminating entry */
+ 	{}
+ };
+@@ -234,6 +250,30 @@ static const struct bcm5974_config bcm59
+ 		{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
+ 		{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
+ 	},
++	{
++		USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
++		USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
++		USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
++		HAS_INTEGRATED_BUTTON,
++		0x84, sizeof(struct bt_data),
++		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
++		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
++		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
++		{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
++		{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
++	},
++	{
++		USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
++		USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
++		USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
++		HAS_INTEGRATED_BUTTON,
++		0x84, sizeof(struct bt_data),
++		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
++		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
++		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
++		{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
++		{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
++	},
+ 	{}
+ };
+ 
+diff -uNrp kernel-2.6.35.fc14.orig/drivers/video/backlight/mbp_nvidia_bl.c kernel-2.6.35.fc14.new/drivers/video/backlight/mbp_nvidia_bl.c
+--- kernel-2.6.35.fc14.orig/drivers/video/backlight/mbp_nvidia_bl.c	2010-11-12 12:35:49.159805591 +0100
++++ kernel-2.6.35.fc14.new/drivers/video/backlight/mbp_nvidia_bl.c	2010-11-12 12:48:47.412131884 +0100
+@@ -335,6 +335,24 @@ static const struct dmi_system_id __init
+ 		},
+ 		.driver_data	= (void *)&nvidia_chipset_data,
+ 	},
++	{
++		.callback	= mbp_dmi_match,
++		.ident		= "MacBookAir 3,1",
++		.matches	= {
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
++		},
++		.driver_data	= (void *)&nvidia_chipset_data,
++	},
++	{
++		.callback	= mbp_dmi_match,
++		.ident		= "MacBookAir 3,2",
++		.matches	= {
++			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
++		},
++		.driver_data	= (void *)&nvidia_chipset_data,
++	},
+ 	{ }
+ };
+ 
+diff -uNrp kernel-2.6.35.fc14.orig/sound/pci/hda/patch_cirrus.c kernel-2.6.35.fc14.new/sound/pci/hda/patch_cirrus.c
+--- kernel-2.6.35.fc14.orig/sound/pci/hda/patch_cirrus.c	2010-11-12 12:35:49.005815268 +0100
++++ kernel-2.6.35.fc14.new/sound/pci/hda/patch_cirrus.c	2010-11-12 12:48:40.379542432 +0100
+@@ -1139,6 +1139,7 @@ static const char *cs420x_models[CS420X_
+ static struct snd_pci_quirk cs420x_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
+ 	SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
++	SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
+ 	{} /* terminator */
+ };
+ 
diff --git a/kernel.spec b/kernel.spec
index ec7ee2bd3..8f9f84fb5 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -718,6 +718,7 @@ Patch12225: pci-crs-fixes.patch
 
 Patch12300: btusb-macbookpro-7-1.patch
 Patch12301: btusb-macbookpro-6-2.patch
+Patch12304: add-macbookair3-ids.patch
 
 Patch12302: pnpacpi-cope-with-invalid-device-ids.patch
 
@@ -1334,6 +1335,7 @@ ApplyPatch pci-crs-fixes.patch
 
 ApplyPatch btusb-macbookpro-7-1.patch
 ApplyPatch btusb-macbookpro-6-2.patch
+ApplyPatch add-macbookair3-ids.patch
 
 # rhbz#641468
 ApplyPatch pnpacpi-cope-with-invalid-device-ids.patch
@@ -1955,6 +1957,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com>
+- rhbz#651019: pull in support for MBA3.
+
 * Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-2
 - drm-i915-reprogram-power-monitoring-registers-on-resume.patch: fix intel_ips
   driver.

From 2c69ace4647b7b554d3a9ece98399cb72fe06415 Mon Sep 17 00:00:00 2001
From: kyle <kyle@ihatethathostname.lab.bos.redhat.com>
Date: Tue, 16 Nov 2010 00:19:59 -0500
Subject: [PATCH 15/56] drm-intel rebase, enable kernel-debug

---
 config-generic                 |     8 +-
 config-nodebug                 |    96 +-
 config-x86_64-generic          |     2 +-
 drm-intel-2.6.37-rc2.patch     | 24423 +++++++++++++++++++++++++++++++
 drm-intel-big-hammer.patch     |    16 +-
 drm-intel-make-lvds-work.patch |    19 +-
 kernel.spec                    |    18 +-
 7 files changed, 24504 insertions(+), 78 deletions(-)
 create mode 100644 drm-intel-2.6.37-rc2.patch

diff --git a/config-generic b/config-generic
index f834847ff..67c580f6c 100644
--- a/config-generic
+++ b/config-generic
@@ -1415,11 +1415,11 @@ CONFIG_ATMEL=m
 CONFIG_B43=m
 CONFIG_B43_PCMCIA=y
 CONFIG_B43_SDIO=y
-CONFIG_B43_DEBUG=y
+# CONFIG_B43_DEBUG is not set
 CONFIG_B43_PHY_LP=y
 # CONFIG_B43_FORCE_PIO is not set
 CONFIG_B43LEGACY=m
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43LEGACY_DEBUG is not set
 CONFIG_B43LEGACY_DMA=y
 CONFIG_B43LEGACY_PIO=y
 CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
@@ -3768,7 +3768,7 @@ CONFIG_IBMASR=m
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TRACE=y
 # CONFIG_PM_VERBOSE is not set
-CONFIG_PM_TEST_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
 CONFIG_PM_RUNTIME=y
 
 ## BEGIN ISA Junk.
@@ -4206,7 +4206,7 @@ CONFIG_USB_ATMEL=m
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_FUNCTION_GRAPH_TRACER is not set
-CONFIG_BOOT_TRACER=y
+# CONFIG_BOOT_TRACER is not set
 CONFIG_EARLY_PRINTK_DBGP=y
 
 CONFIG_SECURITYFS=y
diff --git a/config-nodebug b/config-nodebug
index d2288c5e6..b4472f9a5 100644
--- a/config-nodebug
+++ b/config-nodebug
@@ -2,92 +2,92 @@ CONFIG_SND_VERBOSE_PRINTK=y
 CONFIG_SND_DEBUG=y
 CONFIG_SND_PCM_XRUN_DEBUG=y
 
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_PROVE_RCU=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_PROVE_RCU is not set
 # CONFIG_PROVE_RCU_REPEATEDLY is not set
-CONFIG_DEBUG_PER_CPU_MAPS=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
 CONFIG_CPUMASK_OFFSTACK=y
 
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
 
-CONFIG_FAULT_INJECTION=y
-CONFIG_FAILSLAB=y
-CONFIG_FAIL_PAGE_ALLOC=y
-CONFIG_FAIL_MAKE_REQUEST=y
-CONFIG_FAULT_INJECTION_DEBUG_FS=y
-CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
-CONFIG_FAIL_IO_TIMEOUT=y
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_FAILSLAB is not set
+# CONFIG_FAIL_PAGE_ALLOC is not set
+# CONFIG_FAIL_MAKE_REQUEST is not set
+# CONFIG_FAULT_INJECTION_DEBUG_FS is not set
+# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set
+# CONFIG_FAIL_IO_TIMEOUT is not set
 
-CONFIG_SLUB_DEBUG_ON=y
+# CONFIG_SLUB_DEBUG_ON is not set
 
-CONFIG_LOCK_STAT=y
+# CONFIG_LOCK_STAT is not set
 
-CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
 
-CONFIG_ACPI_DEBUG=y
+# CONFIG_ACPI_DEBUG is not set
 # CONFIG_ACPI_DEBUG_FUNC_TRACE is not set
 
-CONFIG_DEBUG_SG=y
+# CONFIG_DEBUG_SG is not set
 
 # CONFIG_DEBUG_PAGEALLOC is not set
 
-CONFIG_DEBUG_WRITECOUNT=y
-CONFIG_DEBUG_OBJECTS=y
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_OBJECTS is not set
 # CONFIG_DEBUG_OBJECTS_SELFTEST is not set
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
+# CONFIG_DEBUG_OBJECTS_FREE is not set
+# CONFIG_DEBUG_OBJECTS_TIMERS is not set
 CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
 
-CONFIG_X86_PTDUMP=y
+# CONFIG_X86_PTDUMP is not set
 
-CONFIG_CAN_DEBUG_DEVICES=y
+# CONFIG_CAN_DEBUG_DEVICES is not set
 
-CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
 
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
 
-CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_NOTIFIERS is not set
 
-CONFIG_DMA_API_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
 
-CONFIG_MMIOTRACE=y
+# CONFIG_MMIOTRACE is not set
 
-CONFIG_DEBUG_CREDENTIALS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
 
 # off in both production debug and nodebug builds,
 #  on in rawhide nodebug builds
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
 
-CONFIG_EXT4_DEBUG=y
+# CONFIG_EXT4_DEBUG is not set
 
-CONFIG_DEBUG_PERF_USE_VMALLOC=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 
-CONFIG_JBD2_DEBUG=y
+# CONFIG_JBD2_DEBUG is not set
 
-CONFIG_DEBUG_CFQ_IOSCHED=y
+# CONFIG_DEBUG_CFQ_IOSCHED is not set
 
-CONFIG_DRBD_FAULT_INJECTION=y
+# CONFIG_DRBD_FAULT_INJECTION is not set
 
-CONFIG_ATH_DEBUG=y
-CONFIG_IWLWIFI_DEVICE_TRACING=y
+# CONFIG_ATH_DEBUG is not set
+# CONFIG_IWLWIFI_DEVICE_TRACING is not set
 
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_OBJECTS_WORK is not set
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
 
-CONFIG_DMADEVICES_DEBUG=y
-CONFIG_DMADEVICES_VDEBUG=y
+# CONFIG_DMADEVICES_DEBUG is not set
+# CONFIG_DMADEVICES_VDEBUG is not set
 
 CONFIG_PM_ADVANCED_DEBUG=y
 
-CONFIG_CEPH_FS_PRETTYDEBUG=y
-CONFIG_QUOTA_DEBUG=y
+# CONFIG_CEPH_FS_PRETTYDEBUG is not set
+# CONFIG_QUOTA_DEBUG is not set
 
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
 
 CONFIG_PCI_DEFAULT_USE_CRS=y
 
diff --git a/config-x86_64-generic b/config-x86_64-generic
index 713c7ef17..3d8d67b06 100644
--- a/config-x86_64-generic
+++ b/config-x86_64-generic
@@ -15,7 +15,7 @@ CONFIG_NUMA=y
 CONFIG_K8_NUMA=y
 CONFIG_X86_64_ACPI_NUMA=y
 # CONFIG_NUMA_EMU is not set
-CONFIG_NR_CPUS=512
+CONFIG_NR_CPUS=256
 CONFIG_X86_POWERNOW_K8=m
 CONFIG_X86_P4_CLOCKMOD=m
 CONFIG_IA32_EMULATION=y
diff --git a/drm-intel-2.6.37-rc2.patch b/drm-intel-2.6.37-rc2.patch
new file mode 100644
index 000000000..e2a9d28bc
--- /dev/null
+++ b/drm-intel-2.6.37-rc2.patch
@@ -0,0 +1,24423 @@
+ drivers/char/agp/Makefile               |    1 +
+ drivers/char/agp/intel-agp.c            |  201 +---
+ drivers/char/agp/intel-agp.h            |   43 +-
+ drivers/char/agp/intel-gtt.c            | 1614 ++++++++++-----------
+ drivers/gpu/drm/drm_edid.c              |   92 +-
+ drivers/gpu/drm/i915/Makefile           |    4 +-
+ drivers/gpu/drm/i915/dvo_ch7017.c       |   66 +-
+ drivers/gpu/drm/i915/dvo_ch7xxx.c       |   10 +-
+ drivers/gpu/drm/i915/dvo_ivch.c         |   10 +-
+ drivers/gpu/drm/i915/dvo_sil164.c       |   10 +-
+ drivers/gpu/drm/i915/dvo_tfp410.c       |   10 +-
+ drivers/gpu/drm/i915/i915_debugfs.c     |  337 ++++-
+ drivers/gpu/drm/i915/i915_dma.c         |  360 ++----
+ drivers/gpu/drm/i915/i915_drv.c         |  219 ++-
+ drivers/gpu/drm/i915/i915_drv.h         |  272 +++--
+ drivers/gpu/drm/i915/i915_gem.c         | 2292 +++++++++++++++---------------
+ drivers/gpu/drm/i915/i915_gem_debug.c   |  148 ++-
+ drivers/gpu/drm/i915/i915_gem_evict.c   |   72 +-
+ drivers/gpu/drm/i915/i915_gem_tiling.c  |   54 +-
+ drivers/gpu/drm/i915/i915_irq.c         |  259 ++--
+ drivers/gpu/drm/i915/i915_reg.h         |  335 +++--
+ drivers/gpu/drm/i915/i915_suspend.c     |   32 +-
+ drivers/gpu/drm/i915/intel_acpi.c       |  286 ++++
+ drivers/gpu/drm/i915/intel_bios.c       |  234 +++-
+ drivers/gpu/drm/i915/intel_bios.h       |    6 +-
+ drivers/gpu/drm/i915/intel_crt.c        |  127 +-
+ drivers/gpu/drm/i915/intel_display.c    | 2374 ++++++++++++++++---------------
+ drivers/gpu/drm/i915/intel_dp.c         |  658 ++++++---
+ drivers/gpu/drm/i915/intel_drv.h        |  161 ++-
+ drivers/gpu/drm/i915/intel_dvo.c        |   69 +-
+ drivers/gpu/drm/i915/intel_fb.c         |   29 +-
+ drivers/gpu/drm/i915/intel_hdmi.c       |  193 ++-
+ drivers/gpu/drm/i915/intel_i2c.c        |  484 +++++--
+ drivers/gpu/drm/i915/intel_lvds.c       |  445 +++---
+ drivers/gpu/drm/i915/intel_modes.c      |   16 +-
+ drivers/gpu/drm/i915/intel_opregion.c   |  517 +++++++
+ drivers/gpu/drm/i915/intel_overlay.c    | 1004 +++++++------
+ drivers/gpu/drm/i915/intel_panel.c      |  109 ++
+ drivers/gpu/drm/i915/intel_ringbuffer.c |  580 +++++---
+ drivers/gpu/drm/i915/intel_ringbuffer.h |   84 +-
+ drivers/gpu/drm/i915/intel_sdvo.c       | 1076 +++++++--------
+ drivers/gpu/drm/i915/intel_tv.c         |  165 +--
+ include/drm/drm_crtc.h                  |    1 +
+ include/drm/drm_dp_helper.h             |    3 +
+ include/drm/i915_drm.h                  |    6 +-
+ include/drm/intel-gtt.h                 |   18 +
+ 46 files changed, 8590 insertions(+), 6496 deletions(-)
+
+diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
+index 627f542..8eb56e2 100644
+--- a/drivers/char/agp/Makefile
++++ b/drivers/char/agp/Makefile
+@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)	+= hp-agp.o
+ obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
+ obj-$(CONFIG_AGP_I460)		+= i460-agp.o
+ obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
++obj-$(CONFIG_AGP_INTEL)		+= intel-gtt.o
+ obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
+ obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
+ obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index cd18493..e72f49d 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -12,9 +12,6 @@
+ #include <asm/smp.h>
+ #include "agp.h"
+ #include "intel-agp.h"
+-#include <linux/intel-gtt.h>
+-
+-#include "intel-gtt.c"
+ 
+ int intel_agp_enabled;
+ EXPORT_SYMBOL(intel_agp_enabled);
+@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
+ 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+ };
+ 
+-static int find_gmch(u16 device)
+-{
+-	struct pci_dev *gmch_device;
+-
+-	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+-	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+-		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+-					     device, gmch_device);
+-	}
+-
+-	if (!gmch_device)
+-		return 0;
+-
+-	intel_private.pcidev = gmch_device;
+-	return 1;
+-}
+-
+ /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
+  * driver and gmch_driver must be non-null, and find_gmch will determine
+  * which one should be used if a gmch_chip_id is present.
+  */
+-static const struct intel_driver_description {
++static const struct intel_agp_driver_description {
+ 	unsigned int chip_id;
+-	unsigned int gmch_chip_id;
+ 	char *name;
+ 	const struct agp_bridge_driver *driver;
+-	const struct agp_bridge_driver *gmch_driver;
+ } intel_agp_chipsets[] = {
+-	{ PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
+-		NULL, &intel_810_driver },
+-	{ PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
+-		NULL, &intel_810_driver },
+-	{ PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
+-		NULL, &intel_810_driver },
+-	{ PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
+-		&intel_815_driver, &intel_810_driver },
+-	{ PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+-		&intel_830mp_driver, &intel_830_driver },
+-	{ PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+-		&intel_845_driver, &intel_830_driver },
+-	{ PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
+-		&intel_845_driver, &intel_830_driver },
+-	{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+-		&intel_845_driver, &intel_830_driver },
+-	{ PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
+-		&intel_845_driver, &intel_830_driver },
+-	{ PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+-		NULL, &intel_915_driver },
+-	{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+-		NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+-	{ PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+-		NULL, &intel_g33_driver },
+-	{ PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+-		NULL, &intel_g33_driver },
+-	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+-		NULL, &intel_g33_driver },
+-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+-		NULL, &intel_g33_driver },
+-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+-		NULL, &intel_g33_driver },
+-	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
+-	    "GM45", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
+-	    "Eaglelake", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
+-	    "Q45/Q43", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
+-	    "G45/G43", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
+-	    "B43", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+-	    "B43", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
+-	    "G41", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+-	    "HD Graphics", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+-	    "HD Graphics", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+-	    "HD Graphics", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+-	    "HD Graphics", NULL, &intel_i965_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+-	    "Sandybridge", NULL, &intel_gen6_driver },
+-	{ 0, 0, NULL, NULL, NULL }
++	{ PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
++	{ PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
++	{ PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
++	{ PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
++	{ PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
++	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
++	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
++	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
++	{ PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
++	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
++	{ PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
++	{ PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
++	{ PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
++	{ 0, NULL, NULL }
+ };
+ 
+-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+-				      struct agp_bridge_data *bridge)
+-{
+-	int i, mask;
+-
+-	bridge->driver = NULL;
+-
+-	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+-		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+-			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+-			bridge->driver =
+-				intel_agp_chipsets[i].gmch_driver;
+-			break;
+-		}
+-	}
+-
+-	if (!bridge->driver)
+-		return 0;
+-
+-	bridge->dev_private_data = &intel_private;
+-	bridge->dev = pdev;
+-
+-	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+-
+-	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
+-		mask = 40;
+-	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
+-		mask = 36;
+-	else
+-		mask = 32;
+-
+-	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+-		dev_err(&intel_private.pcidev->dev,
+-			"set gfx device dma mask %d-bit failed!\n", mask);
+-	else
+-		pci_set_consistent_dma_mask(intel_private.pcidev,
+-					    DMA_BIT_MASK(mask));
+-
+-	return 1;
+-}
+-
+ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ 				     const struct pci_device_id *ent)
+ {
+@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ 		}
+ 	}
+ 
+-	if (intel_agp_chipsets[i].name == NULL) {
++	if (!bridge->driver) {
+ 		if (cap_ptr)
+ 			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ 				 pdev->vendor, pdev->device);
+@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ 		return -ENODEV;
+ 	}
+ 
+-	if (!bridge->driver) {
+-		if (cap_ptr)
+-			dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
+-			    	 intel_agp_chipsets[i].gmch_chip_id);
+-		agp_put_bridge(bridge);
+-		return -ENODEV;
+-	}
+-
+ 	bridge->dev = pdev;
+ 	bridge->dev_private_data = NULL;
+ 
+@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
+ 
+ 	agp_remove_bridge(bridge);
+ 
+-	if (intel_private.pcidev)
+-		pci_dev_put(intel_private.pcidev);
++	intel_gmch_remove(pdev);
+ 
+ 	agp_put_bridge(bridge);
+ }
+@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
++	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index d09b1ab..90539df 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -215,44 +215,7 @@
+ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB		0x0108  /* Server */
+ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG		0x010A
+ 
+-/* cover 915 and 945 variants */
+-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+-
+-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+-
+-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+-
+-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+-
+-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
+-
+-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+-		IS_SNB)
+-
++int intel_gmch_probe(struct pci_dev *pdev,
++			       struct agp_bridge_data *bridge);
++void intel_gmch_remove(struct pci_dev *pdev);
+ #endif
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 75e0a34..9272c38 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -15,6 +15,18 @@
+  * /fairy-tale-mode off
+  */
+ 
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/pagemap.h>
++#include <linux/agp_backend.h>
++#include <asm/smp.h>
++#include "agp.h"
++#include "intel-agp.h"
++#include <linux/intel-gtt.h>
++#include <drm/intel-gtt.h>
++
+ /*
+  * If we have Intel graphics, we're not going to have anything other than
+  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+@@ -23,11 +35,12 @@
+  */
+ #ifdef CONFIG_DMAR
+ #define USE_PCI_DMA_API 1
++#else
++#define USE_PCI_DMA_API 0
+ #endif
+ 
+ /* Max amount of stolen space, anything above will be returned to Linux */
+ int intel_max_stolen = 32 * 1024 * 1024;
+-EXPORT_SYMBOL(intel_max_stolen);
+ 
+ static const struct aper_size_info_fixed intel_i810_sizes[] =
+ {
+@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
+ #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
+ #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
+ 
+-static struct gatt_mask intel_gen6_masks[] =
+-{
+-	{.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+-	 .type = INTEL_AGP_UNCACHED_MEMORY },
+-	{.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+-         .type = INTEL_AGP_CACHED_MEMORY_LLC },
+-	{.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+-         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+-	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+-         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+-	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+-         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
++struct intel_gtt_driver {
++	unsigned int gen : 8;
++	unsigned int is_g33 : 1;
++	unsigned int is_pineview : 1;
++	unsigned int is_ironlake : 1;
++	unsigned int dma_mask_size : 8;
++	/* Chipset specific GTT setup */
++	int (*setup)(void);
++	/* This should undo anything done in ->setup() save the unmapping
++	 * of the mmio register file, that's done in the generic code. */
++	void (*cleanup)(void);
++	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
++	/* Flags is a more or less chipset specific opaque value.
++	 * For chipsets that need to support old ums (non-gem) code, this
++	 * needs to be identical to the various supported agp memory types! */
++	bool (*check_flags)(unsigned int flags);
++	void (*chipset_flush)(void);
+ };
+ 
+ static struct _intel_private {
++	struct intel_gtt base;
++	const struct intel_gtt_driver *driver;
+ 	struct pci_dev *pcidev;	/* device one */
++	struct pci_dev *bridge_dev;
+ 	u8 __iomem *registers;
++	phys_addr_t gtt_bus_addr;
++	phys_addr_t gma_bus_addr;
++	phys_addr_t pte_bus_addr;
+ 	u32 __iomem *gtt;		/* I915G */
+ 	int num_dcache_entries;
+-	/* gtt_entries is the number of gtt entries that are already mapped
+-	 * to stolen memory.  Stolen memory is larger than the memory mapped
+-	 * through gtt_entries, as it includes some reserved space for the BIOS
+-	 * popup and for the GTT.
+-	 */
+-	int gtt_entries;			/* i830+ */
+-	int gtt_total_size;
+ 	union {
+ 		void __iomem *i9xx_flush_page;
+ 		void *i8xx_flush_page;
+@@ -88,23 +105,14 @@ static struct _intel_private {
+ 	struct page *i8xx_page;
+ 	struct resource ifp_resource;
+ 	int resource_valid;
++	struct page *scratch_page;
++	dma_addr_t scratch_page_dma;
+ } intel_private;
+ 
+-#ifdef USE_PCI_DMA_API
+-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+-{
+-	*ret = pci_map_page(intel_private.pcidev, page, 0,
+-			    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-	if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+-		return -EINVAL;
+-	return 0;
+-}
+-
+-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+-{
+-	pci_unmap_page(intel_private.pcidev, dma,
+-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-}
++#define INTEL_GTT_GEN	intel_private.driver->gen
++#define IS_G33		intel_private.driver->is_g33
++#define IS_PINEVIEW	intel_private.driver->is_pineview
++#define IS_IRONLAKE	intel_private.driver->is_ironlake
+ 
+ static void intel_agp_free_sglist(struct agp_memory *mem)
+ {
+@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
+ 	struct scatterlist *sg;
+ 	int i;
+ 
++	if (mem->sg_list)
++		return 0; /* already mapped (for e.g. resume */
++
+ 	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+ 
+ 	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
+ 	intel_agp_free_sglist(mem);
+ }
+ 
+-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+-					off_t pg_start, int mask_type)
+-{
+-	struct scatterlist *sg;
+-	int i, j;
+-
+-	j = pg_start;
+-
+-	WARN_ON(!mem->num_sg);
+-
+-	if (mem->num_sg == mem->page_count) {
+-		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+-			writel(agp_bridge->driver->mask_memory(agp_bridge,
+-					sg_dma_address(sg), mask_type),
+-					intel_private.gtt+j);
+-			j++;
+-		}
+-	} else {
+-		/* sg may merge pages, but we have to separate
+-		 * per-page addr for GTT */
+-		unsigned int len, m;
+-
+-		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+-			len = sg_dma_len(sg) / PAGE_SIZE;
+-			for (m = 0; m < len; m++) {
+-				writel(agp_bridge->driver->mask_memory(agp_bridge,
+-								       sg_dma_address(sg) + m * PAGE_SIZE,
+-								       mask_type),
+-				       intel_private.gtt+j);
+-				j++;
+-			}
+-		}
+-	}
+-	readl(intel_private.gtt+j-1);
+-}
+-
+-#else
+-
+-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+-					off_t pg_start, int mask_type)
+-{
+-	int i, j;
+-
+-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+-		writel(agp_bridge->driver->mask_memory(agp_bridge,
+-				page_to_phys(mem->pages[i]), mask_type),
+-		       intel_private.gtt+j);
+-	}
+-
+-	readl(intel_private.gtt+j-1);
+-}
+-
+-#endif
+-
+ static int intel_i810_fetch_size(void)
+ {
+ 	u32 smram_miscc;
+ 	struct aper_size_info_fixed *values;
+ 
+-	pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
++	pci_read_config_dword(intel_private.bridge_dev,
++			      I810_SMRAM_MISCC, &smram_miscc);
+ 	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+ 
+ 	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+-		dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
++		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
+ 		return 0;
+ 	}
+ 	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
+ 	iounmap(intel_private.registers);
+ }
+ 
+-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
++static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+ {
+ 	return;
+ }
+@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
+ 	atomic_dec(&agp_bridge->current_memory_agp);
+ }
+ 
+-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+-					int type)
+-{
+-	if (type < AGP_USER_TYPES)
+-		return type;
+-	else if (type == AGP_USER_CACHED_MEMORY)
+-		return INTEL_AGP_CACHED_MEMORY;
+-	else
+-		return 0;
+-}
+-
+-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+-					int type)
+-{
+-	unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+-	unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+-
+-	if (type_mask == AGP_USER_UNCACHED_MEMORY)
+-		return INTEL_AGP_UNCACHED_MEMORY;
+-	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+-		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+-			      INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+-	else /* set 'normal'/'cached' to LLC by default */
+-		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+-			      INTEL_AGP_CACHED_MEMORY_LLC;
+-}
+-
+-
+ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ 				int type)
+ {
+@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ 	return addr | bridge->driver->masks[type].mask;
+ }
+ 
+-static struct aper_size_info_fixed intel_i830_sizes[] =
++static int intel_gtt_setup_scratch_page(void)
+ {
++	struct page *page;
++	dma_addr_t dma_addr;
++
++	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
++	if (page == NULL)
++		return -ENOMEM;
++	get_page(page);
++	set_pages_uc(page, 1);
++
++	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
++		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
++				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
++			return -EINVAL;
++
++		intel_private.scratch_page_dma = dma_addr;
++	} else
++		intel_private.scratch_page_dma = page_to_phys(page);
++
++	intel_private.scratch_page = page;
++
++	return 0;
++}
++
++static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+ 	{128, 32768, 5},
+ 	/* The 64M mode still requires a 128k gatt */
+ 	{64, 16384, 5},
+@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
+ 	{512, 131072, 7},
+ };
+ 
+-static void intel_i830_init_gtt_entries(void)
++static unsigned int intel_gtt_stolen_entries(void)
+ {
+ 	u16 gmch_ctrl;
+-	int gtt_entries = 0;
+ 	u8 rdct;
+ 	int local = 0;
+ 	static const int ddt[4] = { 0, 16, 32, 64 };
+-	int size; /* reserved space (in kb) at the top of stolen memory */
++	unsigned int overhead_entries, stolen_entries;
++	unsigned int stolen_size = 0;
+ 
+-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
++	pci_read_config_word(intel_private.bridge_dev,
++			     I830_GMCH_CTRL, &gmch_ctrl);
+ 
+-	if (IS_I965) {
+-		u32 pgetbl_ctl;
+-		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
++	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
++		overhead_entries = 0;
++	else
++		overhead_entries = intel_private.base.gtt_mappable_entries
++			/ 1024;
+ 
+-		/* The 965 has a field telling us the size of the GTT,
+-		 * which may be larger than what is necessary to map the
+-		 * aperture.
+-		 */
+-		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+-		case I965_PGETBL_SIZE_128KB:
+-			size = 128;
+-			break;
+-		case I965_PGETBL_SIZE_256KB:
+-			size = 256;
+-			break;
+-		case I965_PGETBL_SIZE_512KB:
+-			size = 512;
+-			break;
+-		case I965_PGETBL_SIZE_1MB:
+-			size = 1024;
+-			break;
+-		case I965_PGETBL_SIZE_2MB:
+-			size = 2048;
+-			break;
+-		case I965_PGETBL_SIZE_1_5MB:
+-			size = 1024 + 512;
+-			break;
+-		default:
+-			dev_info(&intel_private.pcidev->dev,
+-				 "unknown page table size, assuming 512KB\n");
+-			size = 512;
+-		}
+-		size += 4; /* add in BIOS popup space */
+-	} else if (IS_G33 && !IS_PINEVIEW) {
+-	/* G33's GTT size defined in gmch_ctrl */
+-		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+-		case G33_PGETBL_SIZE_1M:
+-			size = 1024;
+-			break;
+-		case G33_PGETBL_SIZE_2M:
+-			size = 2048;
+-			break;
+-		default:
+-			dev_info(&agp_bridge->dev->dev,
+-				 "unknown page table size 0x%x, assuming 512KB\n",
+-				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
+-			size = 512;
+-		}
+-		size += 4;
+-	} else if (IS_G4X || IS_PINEVIEW) {
+-		/* On 4 series hardware, GTT stolen is separate from graphics
+-		 * stolen, ignore it in stolen gtt entries counting.  However,
+-		 * 4KB of the stolen memory doesn't get mapped to the GTT.
+-		 */
+-		size = 4;
+-	} else {
+-		/* On previous hardware, the GTT size was just what was
+-		 * required to map the aperture.
+-		 */
+-		size = agp_bridge->driver->fetch_size() + 4;
+-	}
++	overhead_entries += 1; /* BIOS popup */
+ 
+-	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+-	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
++	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
++	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ 		case I830_GMCH_GMS_STOLEN_512:
+-			gtt_entries = KB(512) - KB(size);
++			stolen_size = KB(512);
+ 			break;
+ 		case I830_GMCH_GMS_STOLEN_1024:
+-			gtt_entries = MB(1) - KB(size);
++			stolen_size = MB(1);
+ 			break;
+ 		case I830_GMCH_GMS_STOLEN_8192:
+-			gtt_entries = MB(8) - KB(size);
++			stolen_size = MB(8);
+ 			break;
+ 		case I830_GMCH_GMS_LOCAL:
+ 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+-			gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
++			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+ 					MB(ddt[I830_RDRAM_DDT(rdct)]);
+ 			local = 1;
+ 			break;
+ 		default:
+-			gtt_entries = 0;
++			stolen_size = 0;
+ 			break;
+ 		}
+-	} else if (IS_SNB) {
++	} else if (INTEL_GTT_GEN == 6) {
+ 		/*
+ 		 * SandyBridge has new memory control reg at 0x50.w
+ 		 */
+@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
+ 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ 		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ 		case SNB_GMCH_GMS_STOLEN_32M:
+-			gtt_entries = MB(32) - KB(size);
++			stolen_size = MB(32);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_64M:
+-			gtt_entries = MB(64) - KB(size);
++			stolen_size = MB(64);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_96M:
+-			gtt_entries = MB(96) - KB(size);
++			stolen_size = MB(96);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_128M:
+-			gtt_entries = MB(128) - KB(size);
++			stolen_size = MB(128);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_160M:
+-			gtt_entries = MB(160) - KB(size);
++			stolen_size = MB(160);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_192M:
+-			gtt_entries = MB(192) - KB(size);
++			stolen_size = MB(192);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_224M:
+-			gtt_entries = MB(224) - KB(size);
++			stolen_size = MB(224);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_256M:
+-			gtt_entries = MB(256) - KB(size);
++			stolen_size = MB(256);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_288M:
+-			gtt_entries = MB(288) - KB(size);
++			stolen_size = MB(288);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_320M:
+-			gtt_entries = MB(320) - KB(size);
++			stolen_size = MB(320);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_352M:
+-			gtt_entries = MB(352) - KB(size);
++			stolen_size = MB(352);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_384M:
+-			gtt_entries = MB(384) - KB(size);
++			stolen_size = MB(384);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_416M:
+-			gtt_entries = MB(416) - KB(size);
++			stolen_size = MB(416);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_448M:
+-			gtt_entries = MB(448) - KB(size);
++			stolen_size = MB(448);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_480M:
+-			gtt_entries = MB(480) - KB(size);
++			stolen_size = MB(480);
+ 			break;
+ 		case SNB_GMCH_GMS_STOLEN_512M:
+-			gtt_entries = MB(512) - KB(size);
++			stolen_size = MB(512);
+ 			break;
+ 		}
+ 	} else {
+ 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ 		case I855_GMCH_GMS_STOLEN_1M:
+-			gtt_entries = MB(1) - KB(size);
++			stolen_size = MB(1);
+ 			break;
+ 		case I855_GMCH_GMS_STOLEN_4M:
+-			gtt_entries = MB(4) - KB(size);
++			stolen_size = MB(4);
+ 			break;
+ 		case I855_GMCH_GMS_STOLEN_8M:
+-			gtt_entries = MB(8) - KB(size);
++			stolen_size = MB(8);
+ 			break;
+ 		case I855_GMCH_GMS_STOLEN_16M:
+-			gtt_entries = MB(16) - KB(size);
++			stolen_size = MB(16);
+ 			break;
+ 		case I855_GMCH_GMS_STOLEN_32M:
+-			gtt_entries = MB(32) - KB(size);
++			stolen_size = MB(32);
+ 			break;
+ 		case I915_GMCH_GMS_STOLEN_48M:
+-			/* Check it's really I915G */
+-			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+-				gtt_entries = MB(48) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(48);
+ 			break;
+ 		case I915_GMCH_GMS_STOLEN_64M:
+-			/* Check it's really I915G */
+-			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+-				gtt_entries = MB(64) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(64);
+ 			break;
+ 		case G33_GMCH_GMS_STOLEN_128M:
+-			if (IS_G33 || IS_I965 || IS_G4X)
+-				gtt_entries = MB(128) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(128);
+ 			break;
+ 		case G33_GMCH_GMS_STOLEN_256M:
+-			if (IS_G33 || IS_I965 || IS_G4X)
+-				gtt_entries = MB(256) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(256);
+ 			break;
+ 		case INTEL_GMCH_GMS_STOLEN_96M:
+-			if (IS_I965 || IS_G4X)
+-				gtt_entries = MB(96) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(96);
+ 			break;
+ 		case INTEL_GMCH_GMS_STOLEN_160M:
+-			if (IS_I965 || IS_G4X)
+-				gtt_entries = MB(160) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(160);
+ 			break;
+ 		case INTEL_GMCH_GMS_STOLEN_224M:
+-			if (IS_I965 || IS_G4X)
+-				gtt_entries = MB(224) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(224);
+ 			break;
+ 		case INTEL_GMCH_GMS_STOLEN_352M:
+-			if (IS_I965 || IS_G4X)
+-				gtt_entries = MB(352) - KB(size);
+-			else
+-				gtt_entries = 0;
++			stolen_size = MB(352);
+ 			break;
+ 		default:
+-			gtt_entries = 0;
++			stolen_size = 0;
+ 			break;
+ 		}
+ 	}
+-	if (!local && gtt_entries > intel_max_stolen) {
+-		dev_info(&agp_bridge->dev->dev,
++
++	if (!local && stolen_size > intel_max_stolen) {
++		dev_info(&intel_private.bridge_dev->dev,
+ 			 "detected %dK stolen memory, trimming to %dK\n",
+-			 gtt_entries / KB(1), intel_max_stolen / KB(1));
+-		gtt_entries = intel_max_stolen / KB(4);
+-	} else if (gtt_entries > 0) {
+-		dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+-		       gtt_entries / KB(1), local ? "local" : "stolen");
+-		gtt_entries /= KB(4);
++			 stolen_size / KB(1), intel_max_stolen / KB(1));
++		stolen_size = intel_max_stolen;
++	} else if (stolen_size > 0) {
++		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
++		       stolen_size / KB(1), local ? "local" : "stolen");
+ 	} else {
+-		dev_info(&agp_bridge->dev->dev,
++		dev_info(&intel_private.bridge_dev->dev,
+ 		       "no pre-allocated video memory detected\n");
+-		gtt_entries = 0;
++		stolen_size = 0;
++	}
++
++	stolen_entries = stolen_size/KB(4) - overhead_entries;
++
++	return stolen_entries;
++}
++
++static unsigned int intel_gtt_total_entries(void)
++{
++	int size;
++
++	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
++		u32 pgetbl_ctl;
++		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
++
++		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
++		case I965_PGETBL_SIZE_128KB:
++			size = KB(128);
++			break;
++		case I965_PGETBL_SIZE_256KB:
++			size = KB(256);
++			break;
++		case I965_PGETBL_SIZE_512KB:
++			size = KB(512);
++			break;
++		case I965_PGETBL_SIZE_1MB:
++			size = KB(1024);
++			break;
++		case I965_PGETBL_SIZE_2MB:
++			size = KB(2048);
++			break;
++		case I965_PGETBL_SIZE_1_5MB:
++			size = KB(1024 + 512);
++			break;
++		default:
++			dev_info(&intel_private.pcidev->dev,
++				 "unknown page table size, assuming 512KB\n");
++			size = KB(512);
++		}
++
++		return size/4;
++	} else if (INTEL_GTT_GEN == 6) {
++		u16 snb_gmch_ctl;
++
++		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
++		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
++		default:
++		case SNB_GTT_SIZE_0M:
++			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
++			size = MB(0);
++			break;
++		case SNB_GTT_SIZE_1M:
++			size = MB(1);
++			break;
++		case SNB_GTT_SIZE_2M:
++			size = MB(2);
++			break;
++		}
++		return size/4;
++	} else {
++		/* On previous hardware, the GTT size was just what was
++		 * required to map the aperture.
++		 */
++		return intel_private.base.gtt_mappable_entries;
++	}
++}
++
++static unsigned int intel_gtt_mappable_entries(void)
++{
++	unsigned int aperture_size;
++
++	if (INTEL_GTT_GEN == 2) {
++		u16 gmch_ctrl;
++
++		pci_read_config_word(intel_private.bridge_dev,
++				     I830_GMCH_CTRL, &gmch_ctrl);
++
++		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
++			aperture_size = MB(64);
++		else
++			aperture_size = MB(128);
++	} else {
++		/* 9xx supports large sizes, just look at the length */
++		aperture_size = pci_resource_len(intel_private.pcidev, 2);
++	}
++
++	return aperture_size >> PAGE_SHIFT;
++}
++
++static void intel_gtt_teardown_scratch_page(void)
++{
++	set_pages_wb(intel_private.scratch_page, 1);
++	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
++		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++	put_page(intel_private.scratch_page);
++	__free_page(intel_private.scratch_page);
++}
++
++static void intel_gtt_cleanup(void)
++{
++	intel_private.driver->cleanup();
++
++	iounmap(intel_private.gtt);
++	iounmap(intel_private.registers);
++	
++	intel_gtt_teardown_scratch_page();
++}
++
++static int intel_gtt_init(void)
++{
++	u32 gtt_map_size;
++	int ret;
++
++	ret = intel_private.driver->setup();
++	if (ret != 0)
++		return ret;
++
++	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
++	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
++
++	dev_info(&intel_private.bridge_dev->dev,
++			"detected gtt size: %dK total, %dK mappable\n",
++			intel_private.base.gtt_total_entries * 4,
++			intel_private.base.gtt_mappable_entries * 4);
++
++	gtt_map_size = intel_private.base.gtt_total_entries * 4;
++
++	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
++				    gtt_map_size);
++	if (!intel_private.gtt) {
++		intel_private.driver->cleanup();
++		iounmap(intel_private.registers);
++		return -ENOMEM;
++	}
++
++	global_cache_flush();   /* FIXME: ? */
++
++	/* we have to call this as early as possible after the MMIO base address is known */
++	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
++	if (intel_private.base.gtt_stolen_entries == 0) {
++		intel_private.driver->cleanup();
++		iounmap(intel_private.registers);
++		iounmap(intel_private.gtt);
++		return -ENOMEM;
++	}
++
++	ret = intel_gtt_setup_scratch_page();
++	if (ret != 0) {
++		intel_gtt_cleanup();
++		return ret;
++	}
++
++	return 0;
++}
++
++static int intel_fake_agp_fetch_size(void)
++{
++	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
++	unsigned int aper_size;
++	int i;
++
++	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
++		    / MB(1);
++
++	for (i = 0; i < num_sizes; i++) {
++		if (aper_size == intel_fake_agp_sizes[i].size) {
++			agp_bridge->current_size =
++				(void *) (intel_fake_agp_sizes + i);
++			return aper_size;
++		}
+ 	}
+ 
+-	intel_private.gtt_entries = gtt_entries;
++	return 0;
+ }
+ 
+-static void intel_i830_fini_flush(void)
++static void i830_cleanup(void)
+ {
+ 	kunmap(intel_private.i8xx_page);
+ 	intel_private.i8xx_flush_page = NULL;
+-	unmap_page_from_agp(intel_private.i8xx_page);
+ 
+ 	__free_page(intel_private.i8xx_page);
+ 	intel_private.i8xx_page = NULL;
+@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
+ 	if (intel_private.i8xx_page)
+ 		return;
+ 
+-	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
+ 	if (!intel_private.i8xx_page)
+ 		return;
+ 
+ 	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ 	if (!intel_private.i8xx_flush_page)
+-		intel_i830_fini_flush();
++		i830_cleanup();
+ }
+ 
+ /* The chipset_flush interface needs to get data that has already been
+@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
+  * that buffer out, we just fill 1KB and clflush it out, on the assumption
+  * that it'll push whatever was in there out.  It appears to work.
+  */
+-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
++static void i830_chipset_flush(void)
+ {
+ 	unsigned int *pg = intel_private.i8xx_flush_page;
+ 
+@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+ 		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
+ 
+-/* The intel i830 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
++static void i830_write_entry(dma_addr_t addr, unsigned int entry,
++			     unsigned int flags)
+ {
+-	int page_order;
+-	struct aper_size_info_fixed *size;
+-	int num_entries;
+-	u32 temp;
++	u32 pte_flags = I810_PTE_VALID;
++	
++	switch (flags) {
++	case AGP_DCACHE_MEMORY:
++		pte_flags |= I810_PTE_LOCAL;
++		break;
++	case AGP_USER_CACHED_MEMORY:
++		pte_flags |= I830_PTE_SYSTEM_CACHED;
++		break;
++	}
+ 
+-	size = agp_bridge->current_size;
+-	page_order = size->page_order;
+-	num_entries = size->num_entries;
+-	agp_bridge->gatt_table_real = NULL;
++	writel(addr | pte_flags, intel_private.gtt + entry);
++}
+ 
+-	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+-	temp &= 0xfff80000;
++static void intel_enable_gtt(void)
++{
++	u32 gma_addr;
++	u16 gmch_ctrl;
+ 
+-	intel_private.registers = ioremap(temp, 128 * 4096);
+-	if (!intel_private.registers)
+-		return -ENOMEM;
++	if (INTEL_GTT_GEN == 2)
++		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
++				      &gma_addr);
++	else
++		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
++				      &gma_addr);
+ 
+-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+-	global_cache_flush();	/* FIXME: ?? */
++	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+ 
+-	/* we have to call this as early as possible after the MMIO base address is known */
+-	intel_i830_init_gtt_entries();
+-	if (intel_private.gtt_entries == 0) {
+-		iounmap(intel_private.registers);
++	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
++	gmch_ctrl |= I830_GMCH_ENABLED;
++	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
++
++	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
++	       intel_private.registers+I810_PGETBL_CTL);
++	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
++}
++
++static int i830_setup(void)
++{
++	u32 reg_addr;
++
++	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
++	reg_addr &= 0xfff80000;
++
++	intel_private.registers = ioremap(reg_addr, KB(64));
++	if (!intel_private.registers)
+ 		return -ENOMEM;
+-	}
+ 
+-	agp_bridge->gatt_table = NULL;
++	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
++	intel_private.pte_bus_addr =
++		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ 
+-	agp_bridge->gatt_bus_addr = temp;
++	intel_i830_setup_flush();
+ 
+ 	return 0;
+ }
+ 
+-/* Return the gatt table to a sane state. Use the top of stolen
+- * memory for the GTT.
+- */
+-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
++static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ {
++	agp_bridge->gatt_table_real = NULL;
++	agp_bridge->gatt_table = NULL;
++	agp_bridge->gatt_bus_addr = 0;
++
+ 	return 0;
+ }
+ 
+-static int intel_i830_fetch_size(void)
++static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
+ {
+-	u16 gmch_ctrl;
+-	struct aper_size_info_fixed *values;
++	return 0;
++}
+ 
+-	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
++static int intel_fake_agp_configure(void)
++{
++	int i;
+ 
+-	if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+-	    agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+-		/* 855GM/852GM/865G has 128MB aperture size */
+-		agp_bridge->current_size = (void *) values;
+-		agp_bridge->aperture_size_idx = 0;
+-		return values[0].size;
+-	}
++	intel_enable_gtt();
+ 
+-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
++	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+ 
+-	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+-		agp_bridge->current_size = (void *) values;
+-		agp_bridge->aperture_size_idx = 0;
+-		return values[0].size;
+-	} else {
+-		agp_bridge->current_size = (void *) (values + 1);
+-		agp_bridge->aperture_size_idx = 1;
+-		return values[1].size;
++	for (i = intel_private.base.gtt_stolen_entries;
++			i < intel_private.base.gtt_total_entries; i++) {
++		intel_private.driver->write_entry(intel_private.scratch_page_dma,
++						  i, 0);
+ 	}
++	readl(intel_private.gtt+i-1);	/* PCI Posting. */
++
++	global_cache_flush();
+ 
+ 	return 0;
+ }
+ 
+-static int intel_i830_configure(void)
++static bool i830_check_flags(unsigned int flags)
+ {
+-	struct aper_size_info_fixed *current_size;
+-	u32 temp;
+-	u16 gmch_ctrl;
+-	int i;
++	switch (flags) {
++	case 0:
++	case AGP_PHYS_MEMORY:
++	case AGP_USER_CACHED_MEMORY:
++	case AGP_USER_MEMORY:
++		return true;
++	}
+ 
+-	current_size = A_SIZE_FIX(agp_bridge->current_size);
++	return false;
++}
+ 
+-	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
++static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
++					unsigned int sg_len,
++					unsigned int pg_start,
++					unsigned int flags)
++{
++	struct scatterlist *sg;
++	unsigned int len, m;
++	int i, j;
+ 
+-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+-	gmch_ctrl |= I830_GMCH_ENABLED;
+-	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+-
+-	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
++	j = pg_start;
+ 
+-	if (agp_bridge->driver->needs_scratch_page) {
+-		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+-			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
++	/* sg may merge pages, but we have to separate
++	 * per-page addr for GTT */
++	for_each_sg(sg_list, sg, sg_len, i) {
++		len = sg_dma_len(sg) >> PAGE_SHIFT;
++		for (m = 0; m < len; m++) {
++			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
++			intel_private.driver->write_entry(addr,
++							  j, flags);
++			j++;
+ 		}
+-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI Posting. */
+ 	}
+-
+-	global_cache_flush();
+-
+-	intel_i830_setup_flush();
+-	return 0;
+-}
+-
+-static void intel_i830_cleanup(void)
+-{
+-	iounmap(intel_private.registers);
++	readl(intel_private.gtt+j-1);
+ }
+ 
+-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+-				     int type)
++static int intel_fake_agp_insert_entries(struct agp_memory *mem,
++					 off_t pg_start, int type)
+ {
+-	int i, j, num_entries;
+-	void *temp;
++	int i, j;
+ 	int ret = -EINVAL;
+-	int mask_type;
+ 
+ 	if (mem->page_count == 0)
+ 		goto out;
+ 
+-	temp = agp_bridge->current_size;
+-	num_entries = A_SIZE_FIX(temp)->num_entries;
+-
+-	if (pg_start < intel_private.gtt_entries) {
++	if (pg_start < intel_private.base.gtt_stolen_entries) {
+ 		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+-			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+-			   pg_start, intel_private.gtt_entries);
++			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
++			   pg_start, intel_private.base.gtt_stolen_entries);
+ 
+ 		dev_info(&intel_private.pcidev->dev,
+ 			 "trying to insert into local/stolen memory\n");
+ 		goto out_err;
+ 	}
+ 
+-	if ((pg_start + mem->page_count) > num_entries)
++	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+ 		goto out_err;
+ 
+-	/* The i830 can't check the GTT for entries since its read only,
+-	 * depend on the caller to make the correct offset decisions.
+-	 */
+-
+ 	if (type != mem->type)
+ 		goto out_err;
+ 
+-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+-
+-	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+-	    mask_type != INTEL_AGP_CACHED_MEMORY)
++	if (!intel_private.driver->check_flags(type))
+ 		goto out_err;
+ 
+ 	if (!mem->is_flushed)
+ 		global_cache_flush();
+ 
+-	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+-		writel(agp_bridge->driver->mask_memory(agp_bridge,
+-				page_to_phys(mem->pages[i]), mask_type),
+-		       intel_private.registers+I810_PTE_BASE+(j*4));
++	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
++		ret = intel_agp_map_memory(mem);
++		if (ret != 0)
++			return ret;
++
++		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
++					    pg_start, type);
++	} else {
++		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
++			dma_addr_t addr = page_to_phys(mem->pages[i]);
++			intel_private.driver->write_entry(addr,
++							  j, type);
++		}
++		readl(intel_private.gtt+j-1);
+ 	}
+-	readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+ 
+ out:
+ 	ret = 0;
+@@ -982,29 +1042,39 @@ out_err:
+ 	return ret;
+ }
+ 
+-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+-				     int type)
++static int intel_fake_agp_remove_entries(struct agp_memory *mem,
++					 off_t pg_start, int type)
+ {
+ 	int i;
+ 
+ 	if (mem->page_count == 0)
+ 		return 0;
+ 
+-	if (pg_start < intel_private.gtt_entries) {
++	if (pg_start < intel_private.base.gtt_stolen_entries) {
+ 		dev_info(&intel_private.pcidev->dev,
+ 			 "trying to disable local/stolen memory\n");
+ 		return -EINVAL;
+ 	}
+ 
++	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
++		intel_agp_unmap_memory(mem);
++
+ 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+-		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
++		intel_private.driver->write_entry(intel_private.scratch_page_dma,
++						  i, 0);
+ 	}
+-	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
++	readl(intel_private.gtt+i-1);
+ 
+ 	return 0;
+ }
+ 
+-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
++static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
++{
++	intel_private.driver->chipset_flush();
++}
++
++static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
++						       int type)
+ {
+ 	if (type == AGP_PHYS_MEMORY)
+ 		return alloc_agpphysmem_i8xx(pg_count, type);
+@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+ static int intel_alloc_chipset_flush_resource(void)
+ {
+ 	int ret;
+-	ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
++	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+-				     pcibios_align_resource, agp_bridge->dev);
++				     pcibios_align_resource, intel_private.bridge_dev);
+ 
+ 	return ret;
+ }
+@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
+ 	int ret;
+ 	u32 temp;
+ 
+-	pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
++	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
+ 	if (!(temp & 0x1)) {
+ 		intel_alloc_chipset_flush_resource();
+ 		intel_private.resource_valid = 1;
+-		pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
++		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ 	} else {
+ 		temp &= ~1;
+ 
+@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
+ 	u32 temp_hi, temp_lo;
+ 	int ret;
+ 
+-	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+-	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
++	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
++	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
+ 
+ 	if (!(temp_lo & 0x1)) {
+ 
+ 		intel_alloc_chipset_flush_resource();
+ 
+ 		intel_private.resource_valid = 1;
+-		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
++		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
+ 			upper_32_bits(intel_private.ifp_resource.start));
+-		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
++		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ 	} else {
+ 		u64 l64;
+ 
+@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
+ 	if (intel_private.ifp_resource.start)
+ 		return;
+ 
+-	if (IS_SNB)
++	if (INTEL_GTT_GEN == 6)
+ 		return;
+ 
+ 	/* setup a resource for this object */
+@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
+ 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
+ 
+ 	/* Setup chipset flush for 915 */
+-	if (IS_I965 || IS_G33 || IS_G4X) {
++	if (IS_G33 || INTEL_GTT_GEN >= 4) {
+ 		intel_i965_g33_setup_chipset_flush();
+ 	} else {
+ 		intel_i915_setup_chipset_flush();
+@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
+ 			"can't ioremap flush page - no chipset flushing\n");
+ }
+ 
+-static int intel_i9xx_configure(void)
+-{
+-	struct aper_size_info_fixed *current_size;
+-	u32 temp;
+-	u16 gmch_ctrl;
+-	int i;
+-
+-	current_size = A_SIZE_FIX(agp_bridge->current_size);
+-
+-	pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+-
+-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+-
+-	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+-	gmch_ctrl |= I830_GMCH_ENABLED;
+-	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+-
+-	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+-
+-	if (agp_bridge->driver->needs_scratch_page) {
+-		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+-			writel(agp_bridge->scratch_page, intel_private.gtt+i);
+-		}
+-		readl(intel_private.gtt+i-1);	/* PCI Posting. */
+-	}
+-
+-	global_cache_flush();
+-
+-	intel_i9xx_setup_flush();
+-
+-	return 0;
+-}
+-
+-static void intel_i915_cleanup(void)
++static void i9xx_cleanup(void)
+ {
+ 	if (intel_private.i9xx_flush_page)
+ 		iounmap(intel_private.i9xx_flush_page);
+@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
+ 		release_resource(&intel_private.ifp_resource);
+ 	intel_private.ifp_resource.start = 0;
+ 	intel_private.resource_valid = 0;
+-	iounmap(intel_private.gtt);
+-	iounmap(intel_private.registers);
+ }
+ 
+-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
++static void i9xx_chipset_flush(void)
+ {
+ 	if (intel_private.i9xx_flush_page)
+ 		writel(1, intel_private.i9xx_flush_page);
+ }
+ 
+-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+-				     int type)
++static void i965_write_entry(dma_addr_t addr, unsigned int entry,
++			     unsigned int flags)
+ {
+-	int num_entries;
+-	void *temp;
+-	int ret = -EINVAL;
+-	int mask_type;
+-
+-	if (mem->page_count == 0)
+-		goto out;
+-
+-	temp = agp_bridge->current_size;
+-	num_entries = A_SIZE_FIX(temp)->num_entries;
+-
+-	if (pg_start < intel_private.gtt_entries) {
+-		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+-			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+-			   pg_start, intel_private.gtt_entries);
+-
+-		dev_info(&intel_private.pcidev->dev,
+-			 "trying to insert into local/stolen memory\n");
+-		goto out_err;
+-	}
+-
+-	if ((pg_start + mem->page_count) > num_entries)
+-		goto out_err;
+-
+-	/* The i915 can't check the GTT for entries since it's read only;
+-	 * depend on the caller to make the correct offset decisions.
+-	 */
+-
+-	if (type != mem->type)
+-		goto out_err;
+-
+-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+-
+-	if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+-	    mask_type != INTEL_AGP_CACHED_MEMORY)
+-		goto out_err;
+-
+-	if (!mem->is_flushed)
+-		global_cache_flush();
+-
+-	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+-
+- out:
+-	ret = 0;
+- out_err:
+-	mem->is_flushed = true;
+-	return ret;
++	/* Shift high bits down */
++	addr |= (addr >> 28) & 0xf0;
++	writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
+ }
+ 
+-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+-				     int type)
++static bool gen6_check_flags(unsigned int flags)
+ {
+-	int i;
+-
+-	if (mem->page_count == 0)
+-		return 0;
+-
+-	if (pg_start < intel_private.gtt_entries) {
+-		dev_info(&intel_private.pcidev->dev,
+-			 "trying to disable local/stolen memory\n");
+-		return -EINVAL;
+-	}
+-
+-	for (i = pg_start; i < (mem->page_count + pg_start); i++)
+-		writel(agp_bridge->scratch_page, intel_private.gtt+i);
+-
+-	readl(intel_private.gtt+i-1);
+-
+-	return 0;
++	return true;
+ }
+ 
+-/* Return the aperture size by just checking the resource length.  The effect
+- * described in the spec of the MSAC registers is just changing of the
+- * resource size.
+- */
+-static int intel_i9xx_fetch_size(void)
++static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
++			     unsigned int flags)
+ {
+-	int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+-	int aper_size; /* size in megabytes */
+-	int i;
+-
+-	aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+-
+-	for (i = 0; i < num_sizes; i++) {
+-		if (aper_size == intel_i830_sizes[i].size) {
+-			agp_bridge->current_size = intel_i830_sizes + i;
+-			return aper_size;
+-		}
++	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
++	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
++	u32 pte_flags;
++
++	if (type_mask == AGP_USER_MEMORY)
++		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
++	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
++		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
++		if (gfdt)
++			pte_flags |= GEN6_PTE_GFDT;
++	} else { /* set 'normal'/'cached' to LLC by default */
++		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
++		if (gfdt)
++			pte_flags |= GEN6_PTE_GFDT;
+ 	}
+ 
+-	return 0;
++	/* gen6 has bit11-4 for physical addr bit39-32 */
++	addr |= (addr >> 28) & 0xff0;
++	writel(addr | pte_flags, intel_private.gtt + entry);
+ }
+ 
+-static int intel_i915_get_gtt_size(void)
++static void gen6_cleanup(void)
+ {
+-	int size;
+-
+-	if (IS_G33) {
+-		u16 gmch_ctrl;
+-
+-		/* G33's GTT size defined in gmch_ctrl */
+-		pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+-		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+-		case I830_GMCH_GMS_STOLEN_512:
+-			size = 512;
+-			break;
+-		case I830_GMCH_GMS_STOLEN_1024:
+-			size = 1024;
+-			break;
+-		case I830_GMCH_GMS_STOLEN_8192:
+-			size = 8*1024;
+-			break;
+-		default:
+-			dev_info(&agp_bridge->dev->dev,
+-				 "unknown page table size 0x%x, assuming 512KB\n",
+-				(gmch_ctrl & I830_GMCH_GMS_MASK));
+-			size = 512;
+-		}
+-	} else {
+-		/* On previous hardware, the GTT size was just what was
+-		 * required to map the aperture.
+-		 */
+-		size = agp_bridge->driver->fetch_size();
+-	}
+-
+-	return KB(size);
+ }
+ 
+-/* The intel i915 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
++static int i9xx_setup(void)
+ {
+-	int page_order;
+-	struct aper_size_info_fixed *size;
+-	int num_entries;
+-	u32 temp, temp2;
+-	int gtt_map_size;
+-
+-	size = agp_bridge->current_size;
+-	page_order = size->page_order;
+-	num_entries = size->num_entries;
+-	agp_bridge->gatt_table_real = NULL;
+-
+-	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+-	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+-
+-	gtt_map_size = intel_i915_get_gtt_size();
++	u32 reg_addr;
+ 
+-	intel_private.gtt = ioremap(temp2, gtt_map_size);
+-	if (!intel_private.gtt)
+-		return -ENOMEM;
+-
+-	intel_private.gtt_total_size = gtt_map_size / 4;
+-
+-	temp &= 0xfff80000;
+-
+-	intel_private.registers = ioremap(temp, 128 * 4096);
+-	if (!intel_private.registers) {
+-		iounmap(intel_private.gtt);
+-		return -ENOMEM;
+-	}
++	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
+ 
+-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+-	global_cache_flush();	/* FIXME: ? */
++	reg_addr &= 0xfff80000;
+ 
+-	/* we have to call this as early as possible after the MMIO base address is known */
+-	intel_i830_init_gtt_entries();
+-	if (intel_private.gtt_entries == 0) {
+-		iounmap(intel_private.gtt);
+-		iounmap(intel_private.registers);
++	intel_private.registers = ioremap(reg_addr, 128 * 4096);
++	if (!intel_private.registers)
+ 		return -ENOMEM;
+-	}
+ 
+-	agp_bridge->gatt_table = NULL;
+-
+-	agp_bridge->gatt_bus_addr = temp;
++	if (INTEL_GTT_GEN == 3) {
++		u32 gtt_addr;
+ 
+-	return 0;
+-}
+-
+-/*
+- * The i965 supports 36-bit physical addresses, but to keep
+- * the format of the GTT the same, the bits that don't fit
+- * in a 32-bit word are shifted down to bits 4..7.
+- *
+- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+- * is always zero on 32-bit architectures, so no need to make
+- * this conditional.
+- */
+-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+-					    dma_addr_t addr, int type)
+-{
+-	/* Shift high bits down */
+-	addr |= (addr >> 28) & 0xf0;
+-
+-	/* Type checking must be done elsewhere */
+-	return addr | bridge->driver->masks[type].mask;
+-}
+-
+-static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
+-					    dma_addr_t addr, int type)
+-{
+-	/* gen6 has bit11-4 for physical addr bit39-32 */
+-	addr |= (addr >> 28) & 0xff0;
+-
+-	/* Type checking must be done elsewhere */
+-	return addr | bridge->driver->masks[type].mask;
+-}
+-
+-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+-{
+-	u16 snb_gmch_ctl;
+-
+-	switch (agp_bridge->dev->device) {
+-	case PCI_DEVICE_ID_INTEL_GM45_HB:
+-	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+-	case PCI_DEVICE_ID_INTEL_Q45_HB:
+-	case PCI_DEVICE_ID_INTEL_G45_HB:
+-	case PCI_DEVICE_ID_INTEL_G41_HB:
+-	case PCI_DEVICE_ID_INTEL_B43_HB:
+-	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+-	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+-	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+-	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+-		*gtt_offset = *gtt_size = MB(2);
+-		break;
+-	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+-	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+-	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
+-		*gtt_offset = MB(2);
++		pci_read_config_dword(intel_private.pcidev,
++				      I915_PTEADDR, &gtt_addr);
++		intel_private.gtt_bus_addr = gtt_addr;
++	} else {
++		u32 gtt_offset;
+ 
+-		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+-		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+-		default:
+-		case SNB_GTT_SIZE_0M:
+-			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+-			*gtt_size = MB(0);
+-			break;
+-		case SNB_GTT_SIZE_1M:
+-			*gtt_size = MB(1);
++		switch (INTEL_GTT_GEN) {
++		case 5:
++		case 6:
++			gtt_offset = MB(2);
+ 			break;
+-		case SNB_GTT_SIZE_2M:
+-			*gtt_size = MB(2);
++		case 4:
++		default:
++			gtt_offset =  KB(512);
+ 			break;
+ 		}
+-		break;
+-	default:
+-		*gtt_offset = *gtt_size = KB(512);
+-	}
+-}
+-
+-/* The intel i965 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+-{
+-	int page_order;
+-	struct aper_size_info_fixed *size;
+-	int num_entries;
+-	u32 temp;
+-	int gtt_offset, gtt_size;
+-
+-	size = agp_bridge->current_size;
+-	page_order = size->page_order;
+-	num_entries = size->num_entries;
+-	agp_bridge->gatt_table_real = NULL;
+-
+-	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+-
+-	temp &= 0xfff00000;
+-
+-	intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+-
+-	intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+-
+-	if (!intel_private.gtt)
+-		return -ENOMEM;
+-
+-	intel_private.gtt_total_size = gtt_size / 4;
+-
+-	intel_private.registers = ioremap(temp, 128 * 4096);
+-	if (!intel_private.registers) {
+-		iounmap(intel_private.gtt);
+-		return -ENOMEM;
+-	}
+-
+-	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+-	global_cache_flush();   /* FIXME: ? */
+-
+-	/* we have to call this as early as possible after the MMIO base address is known */
+-	intel_i830_init_gtt_entries();
+-	if (intel_private.gtt_entries == 0) {
+-		iounmap(intel_private.gtt);
+-		iounmap(intel_private.registers);
+-		return -ENOMEM;
++		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ 	}
+ 
+-	agp_bridge->gatt_table = NULL;
++	intel_private.pte_bus_addr =
++		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ 
+-	agp_bridge->gatt_bus_addr = temp;
++	intel_i9xx_setup_flush();
+ 
+ 	return 0;
+ }
+@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
+ 	.cleanup		= intel_i810_cleanup,
+ 	.mask_memory		= intel_i810_mask_memory,
+ 	.masks			= intel_i810_masks,
+-	.agp_enable		= intel_i810_agp_enable,
++	.agp_enable		= intel_fake_agp_enable,
+ 	.cache_flush		= global_cache_flush,
+ 	.create_gatt_table	= agp_generic_create_gatt_table,
+ 	.free_gatt_table	= agp_generic_free_gatt_table,
+@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
+ 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+ };
+ 
+-static const struct agp_bridge_driver intel_830_driver = {
++static const struct agp_bridge_driver intel_fake_agp_driver = {
+ 	.owner			= THIS_MODULE,
+-	.aperture_sizes		= intel_i830_sizes,
+ 	.size_type		= FIXED_APER_SIZE,
+-	.num_aperture_sizes	= 4,
+-	.needs_scratch_page	= true,
+-	.configure		= intel_i830_configure,
+-	.fetch_size		= intel_i830_fetch_size,
+-	.cleanup		= intel_i830_cleanup,
+-	.mask_memory		= intel_i810_mask_memory,
+-	.masks			= intel_i810_masks,
+-	.agp_enable		= intel_i810_agp_enable,
++	.aperture_sizes		= intel_fake_agp_sizes,
++	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
++	.configure		= intel_fake_agp_configure,
++	.fetch_size		= intel_fake_agp_fetch_size,
++	.cleanup		= intel_gtt_cleanup,
++	.agp_enable		= intel_fake_agp_enable,
+ 	.cache_flush		= global_cache_flush,
+-	.create_gatt_table	= intel_i830_create_gatt_table,
+-	.free_gatt_table	= intel_i830_free_gatt_table,
+-	.insert_memory		= intel_i830_insert_entries,
+-	.remove_memory		= intel_i830_remove_entries,
+-	.alloc_by_type		= intel_i830_alloc_by_type,
++	.create_gatt_table	= intel_fake_agp_create_gatt_table,
++	.free_gatt_table	= intel_fake_agp_free_gatt_table,
++	.insert_memory		= intel_fake_agp_insert_entries,
++	.remove_memory		= intel_fake_agp_remove_entries,
++	.alloc_by_type		= intel_fake_agp_alloc_by_type,
+ 	.free_by_type		= intel_i810_free_by_type,
+ 	.agp_alloc_page		= agp_generic_alloc_page,
+ 	.agp_alloc_pages        = agp_generic_alloc_pages,
+ 	.agp_destroy_page	= agp_generic_destroy_page,
+ 	.agp_destroy_pages      = agp_generic_destroy_pages,
+-	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+-	.chipset_flush		= intel_i830_chipset_flush,
++	.chipset_flush		= intel_fake_agp_chipset_flush,
+ };
+ 
+-static const struct agp_bridge_driver intel_915_driver = {
+-	.owner			= THIS_MODULE,
+-	.aperture_sizes		= intel_i830_sizes,
+-	.size_type		= FIXED_APER_SIZE,
+-	.num_aperture_sizes	= 4,
+-	.needs_scratch_page	= true,
+-	.configure		= intel_i9xx_configure,
+-	.fetch_size		= intel_i9xx_fetch_size,
+-	.cleanup		= intel_i915_cleanup,
+-	.mask_memory		= intel_i810_mask_memory,
+-	.masks			= intel_i810_masks,
+-	.agp_enable		= intel_i810_agp_enable,
+-	.cache_flush		= global_cache_flush,
+-	.create_gatt_table	= intel_i915_create_gatt_table,
+-	.free_gatt_table	= intel_i830_free_gatt_table,
+-	.insert_memory		= intel_i915_insert_entries,
+-	.remove_memory		= intel_i915_remove_entries,
+-	.alloc_by_type		= intel_i830_alloc_by_type,
+-	.free_by_type		= intel_i810_free_by_type,
+-	.agp_alloc_page		= agp_generic_alloc_page,
+-	.agp_alloc_pages        = agp_generic_alloc_pages,
+-	.agp_destroy_page	= agp_generic_destroy_page,
+-	.agp_destroy_pages      = agp_generic_destroy_pages,
+-	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+-	.chipset_flush		= intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+-	.agp_map_page		= intel_agp_map_page,
+-	.agp_unmap_page		= intel_agp_unmap_page,
+-	.agp_map_memory		= intel_agp_map_memory,
+-	.agp_unmap_memory	= intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i81x_gtt_driver = {
++	.gen = 1,
++	.dma_mask_size = 32,
+ };
+-
+-static const struct agp_bridge_driver intel_i965_driver = {
+-	.owner			= THIS_MODULE,
+-	.aperture_sizes		= intel_i830_sizes,
+-	.size_type		= FIXED_APER_SIZE,
+-	.num_aperture_sizes	= 4,
+-	.needs_scratch_page	= true,
+-	.configure		= intel_i9xx_configure,
+-	.fetch_size		= intel_i9xx_fetch_size,
+-	.cleanup		= intel_i915_cleanup,
+-	.mask_memory		= intel_i965_mask_memory,
+-	.masks			= intel_i810_masks,
+-	.agp_enable		= intel_i810_agp_enable,
+-	.cache_flush		= global_cache_flush,
+-	.create_gatt_table	= intel_i965_create_gatt_table,
+-	.free_gatt_table	= intel_i830_free_gatt_table,
+-	.insert_memory		= intel_i915_insert_entries,
+-	.remove_memory		= intel_i915_remove_entries,
+-	.alloc_by_type		= intel_i830_alloc_by_type,
+-	.free_by_type		= intel_i810_free_by_type,
+-	.agp_alloc_page		= agp_generic_alloc_page,
+-	.agp_alloc_pages        = agp_generic_alloc_pages,
+-	.agp_destroy_page	= agp_generic_destroy_page,
+-	.agp_destroy_pages      = agp_generic_destroy_pages,
+-	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
+-	.chipset_flush		= intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+-	.agp_map_page		= intel_agp_map_page,
+-	.agp_unmap_page		= intel_agp_unmap_page,
+-	.agp_map_memory		= intel_agp_map_memory,
+-	.agp_unmap_memory	= intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i8xx_gtt_driver = {
++	.gen = 2,
++	.setup = i830_setup,
++	.cleanup = i830_cleanup,
++	.write_entry = i830_write_entry,
++	.dma_mask_size = 32,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i830_chipset_flush,
+ };
+-
+-static const struct agp_bridge_driver intel_gen6_driver = {
+-	.owner			= THIS_MODULE,
+-	.aperture_sizes		= intel_i830_sizes,
+-	.size_type		= FIXED_APER_SIZE,
+-	.num_aperture_sizes	= 4,
+-	.needs_scratch_page	= true,
+-	.configure		= intel_i9xx_configure,
+-	.fetch_size		= intel_i9xx_fetch_size,
+-	.cleanup		= intel_i915_cleanup,
+-	.mask_memory		= intel_gen6_mask_memory,
+-	.masks			= intel_gen6_masks,
+-	.agp_enable		= intel_i810_agp_enable,
+-	.cache_flush		= global_cache_flush,
+-	.create_gatt_table	= intel_i965_create_gatt_table,
+-	.free_gatt_table	= intel_i830_free_gatt_table,
+-	.insert_memory		= intel_i915_insert_entries,
+-	.remove_memory		= intel_i915_remove_entries,
+-	.alloc_by_type		= intel_i830_alloc_by_type,
+-	.free_by_type		= intel_i810_free_by_type,
+-	.agp_alloc_page		= agp_generic_alloc_page,
+-	.agp_alloc_pages        = agp_generic_alloc_pages,
+-	.agp_destroy_page	= agp_generic_destroy_page,
+-	.agp_destroy_pages      = agp_generic_destroy_pages,
+-	.agp_type_to_mask_type	= intel_gen6_type_to_mask_type,
+-	.chipset_flush		= intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+-	.agp_map_page		= intel_agp_map_page,
+-	.agp_unmap_page		= intel_agp_unmap_page,
+-	.agp_map_memory		= intel_agp_map_memory,
+-	.agp_unmap_memory	= intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i915_gtt_driver = {
++	.gen = 3,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
++	.write_entry = i830_write_entry, 
++	.dma_mask_size = 32,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver g33_gtt_driver = {
++	.gen = 3,
++	.is_g33 = 1,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	.write_entry = i965_write_entry,
++	.dma_mask_size = 36,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver pineview_gtt_driver = {
++	.gen = 3,
++	.is_pineview = 1, .is_g33 = 1,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	.write_entry = i965_write_entry,
++	.dma_mask_size = 36,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver i965_gtt_driver = {
++	.gen = 4,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	.write_entry = i965_write_entry,
++	.dma_mask_size = 36,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver g4x_gtt_driver = {
++	.gen = 5,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	.write_entry = i965_write_entry,
++	.dma_mask_size = 36,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver ironlake_gtt_driver = {
++	.gen = 5,
++	.is_ironlake = 1,
++	.setup = i9xx_setup,
++	.cleanup = i9xx_cleanup,
++	.write_entry = i965_write_entry,
++	.dma_mask_size = 36,
++	.check_flags = i830_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver sandybridge_gtt_driver = {
++	.gen = 6,
++	.setup = i9xx_setup,
++	.cleanup = gen6_cleanup,
++	.write_entry = gen6_write_entry,
++	.dma_mask_size = 40,
++	.check_flags = gen6_check_flags,
++	.chipset_flush = i9xx_chipset_flush,
+ };
+ 
+-static const struct agp_bridge_driver intel_g33_driver = {
+-	.owner			= THIS_MODULE,
+-	.aperture_sizes		= intel_i830_sizes,
+-	.size_type		= FIXED_APER_SIZE,
+-	.num_aperture_sizes	= 4,
+-	.needs_scratch_page	= true,
+-	.configure		= intel_i9xx_configure,
+-	.fetch_size		= intel_i9xx_fetch_size,
+-	.cleanup		= intel_i915_cleanup,
+-	.mask_memory		= intel_i965_mask_memory,
+-	.masks			= intel_i810_masks,
+-	.agp_enable		= intel_i810_agp_enable,
+-	.cache_flush		= global_cache_flush,
+-	.create_gatt_table	= intel_i915_create_gatt_table,
+-	.free_gatt_table	= intel_i830_free_gatt_table,
+-	.insert_memory		= intel_i915_insert_entries,
+-	.remove_memory		= intel_i915_remove_entries,
+-	.alloc_by_type		= intel_i830_alloc_by_type,
+-	.free_by_type		= intel_i810_free_by_type,
+-	.agp_alloc_page		= agp_generic_alloc_page,
+-	.agp_alloc_pages        = agp_generic_alloc_pages,
+-	.agp_destroy_page	= agp_generic_destroy_page,
+-	.agp_destroy_pages      = agp_generic_destroy_pages,
+-	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
+-	.chipset_flush		= intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+-	.agp_map_page		= intel_agp_map_page,
+-	.agp_unmap_page		= intel_agp_unmap_page,
+-	.agp_map_memory		= intel_agp_map_memory,
+-	.agp_unmap_memory	= intel_agp_unmap_memory,
+-#endif
++/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
++ * driver and gmch_driver must be non-null, and find_gmch will determine
++ * which one should be used if a gmch_chip_id is present.
++ */
++static const struct intel_gtt_driver_description {
++	unsigned int gmch_chip_id;
++	char *name;
++	const struct agp_bridge_driver *gmch_driver;
++	const struct intel_gtt_driver *gtt_driver;
++} intel_gtt_chipsets[] = {
++	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
++		&i81x_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
++		&i81x_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
++		&i81x_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
++		&i81x_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
++		&intel_fake_agp_driver, &i8xx_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
++		&intel_fake_agp_driver, &i8xx_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
++		&intel_fake_agp_driver, &i8xx_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
++		&intel_fake_agp_driver, &i8xx_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
++		&intel_fake_agp_driver, &i8xx_gtt_driver},
++	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
++		&intel_fake_agp_driver, &i915_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
++		&intel_fake_agp_driver, &i965_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
++		&intel_fake_agp_driver, &g33_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
++		&intel_fake_agp_driver, &g33_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
++		&intel_fake_agp_driver, &g33_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
++		&intel_fake_agp_driver, &pineview_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
++		&intel_fake_agp_driver, &pineview_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
++		&intel_fake_agp_driver, &g4x_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
++	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
++	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
++	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++	{ 0, NULL, NULL }
+ };
++
++static int find_gmch(u16 device)
++{
++	struct pci_dev *gmch_device;
++
++	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
++	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
++		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
++					     device, gmch_device);
++	}
++
++	if (!gmch_device)
++		return 0;
++
++	intel_private.pcidev = gmch_device;
++	return 1;
++}
++
++int intel_gmch_probe(struct pci_dev *pdev,
++				      struct agp_bridge_data *bridge)
++{
++	int i, mask;
++	bridge->driver = NULL;
++
++	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
++		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
++			bridge->driver =
++				intel_gtt_chipsets[i].gmch_driver;
++			intel_private.driver = 
++				intel_gtt_chipsets[i].gtt_driver;
++			break;
++		}
++	}
++
++	if (!bridge->driver)
++		return 0;
++
++	bridge->dev_private_data = &intel_private;
++	bridge->dev = pdev;
++
++	intel_private.bridge_dev = pci_dev_get(pdev);
++
++	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
++
++	mask = intel_private.driver->dma_mask_size;
++	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
++		dev_err(&intel_private.pcidev->dev,
++			"set gfx device dma mask %d-bit failed!\n", mask);
++	else
++		pci_set_consistent_dma_mask(intel_private.pcidev,
++					    DMA_BIT_MASK(mask));
++
++	if (bridge->driver == &intel_810_driver)
++		return 1;
++
++	if (intel_gtt_init() != 0)
++		return 0;
++
++	return 1;
++}
++EXPORT_SYMBOL(intel_gmch_probe);
++
++struct intel_gtt *intel_gtt_get(void)
++{
++	return &intel_private.base;
++}
++EXPORT_SYMBOL(intel_gtt_get);
++
++void intel_gmch_remove(struct pci_dev *pdev)
++{
++	if (intel_private.pcidev)
++		pci_dev_put(intel_private.pcidev);
++	if (intel_private.bridge_dev)
++		pci_dev_put(intel_private.bridge_dev);
++}
++EXPORT_SYMBOL(intel_gmch_remove);
++
++MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 96e9631..7f356af 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1268,34 +1268,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ }
+ 
+ #define HDMI_IDENTIFIER 0x000C03
++#define AUDIO_BLOCK	0x01
+ #define VENDOR_BLOCK    0x03
++#define EDID_BASIC_AUDIO	(1 << 6)
++
+ /**
+- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+- * @edid: monitor EDID information
+- *
+- * Parse the CEA extension according to CEA-861-B.
+- * Return true if HDMI, false if not or unknown.
++ * Search EDID for CEA extension block.
+  */
+-bool drm_detect_hdmi_monitor(struct edid *edid)
++static u8 *drm_find_cea_extension(struct edid *edid)
+ {
+-	char *edid_ext = NULL;
+-	int i, hdmi_id;
+-	int start_offset, end_offset;
+-	bool is_hdmi = false;
++	u8 *edid_ext = NULL;
++	int i;
+ 
+ 	/* No EDID or EDID extensions */
+ 	if (edid == NULL || edid->extensions == 0)
+-		goto end;
++		return NULL;
+ 
+ 	/* Find CEA extension */
+ 	for (i = 0; i < edid->extensions; i++) {
+-		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+-		/* This block is CEA extension */
+-		if (edid_ext[0] == 0x02)
++		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
++		if (edid_ext[0] == CEA_EXT)
+ 			break;
+ 	}
+ 
+ 	if (i == edid->extensions)
++		return NULL;
++
++	return edid_ext;
++}
++
++/**
++ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
++ * @edid: monitor EDID information
++ *
++ * Parse the CEA extension according to CEA-861-B.
++ * Return true if HDMI, false if not or unknown.
++ */
++bool drm_detect_hdmi_monitor(struct edid *edid)
++{
++	u8 *edid_ext;
++	int i, hdmi_id;
++	int start_offset, end_offset;
++	bool is_hdmi = false;
++
++	edid_ext = drm_find_cea_extension(edid);
++	if (!edid_ext)
+ 		goto end;
+ 
+ 	/* Data block offset in CEA extension block */
+@@ -1326,6 +1343,53 @@ end:
+ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+ 
+ /**
++ * drm_detect_monitor_audio - check monitor audio capability
++ *
++ * Monitor should have CEA extension block.
++ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
++ * audio' only. If there is any audio extension block and supported
++ * audio format, assume at least 'basic audio' support, even if 'basic
++ * audio' is not defined in EDID.
++ *
++ */
++bool drm_detect_monitor_audio(struct edid *edid)
++{
++	u8 *edid_ext;
++	int i, j;
++	bool has_audio = false;
++	int start_offset, end_offset;
++
++	edid_ext = drm_find_cea_extension(edid);
++	if (!edid_ext)
++		goto end;
++
++	has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
++
++	if (has_audio) {
++		DRM_DEBUG_KMS("Monitor has basic audio support\n");
++		goto end;
++	}
++
++	/* Data block offset in CEA extension block */
++	start_offset = 4;
++	end_offset = edid_ext[2];
++
++	for (i = start_offset; i < end_offset;
++			i += ((edid_ext[i] & 0x1f) + 1)) {
++		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
++			has_audio = true;
++			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
++				DRM_DEBUG_KMS("CEA audio format %d\n",
++					      (edid_ext[i + j] >> 3) & 0xf);
++			goto end;
++		}
++	}
++end:
++	return has_audio;
++}
++EXPORT_SYMBOL(drm_detect_monitor_audio);
++
++/**
+  * drm_add_edid_modes - add modes from EDID data, if available
+  * @connector: connector we're probing
+  * @edid: edid data
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index 5c8e534..fdc833d 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ 	  intel_dvo.o \
+ 	  intel_ringbuffer.o \
+ 	  intel_overlay.o \
++	  intel_opregion.o \
+ 	  dvo_ch7xxx.o \
+ 	  dvo_ch7017.o \
+ 	  dvo_ivch.o \
+ 	  dvo_tfp410.o \
+ 	  dvo_sil164.o
+ 
+-i915-$(CONFIG_ACPI)	+= i915_opregion.o
+ i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+ 
++i915-$(CONFIG_ACPI)	+= intel_acpi.o
++
+ obj-$(CONFIG_DRM_I915)  += i915.o
+ 
+ CFLAGS_i915_trace_points.o := -I$(src)
+diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
+index 14d5980..af70337 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7017.c
++++ b/drivers/gpu/drm/i915/dvo_ch7017.c
+@@ -165,67 +165,44 @@ struct ch7017_priv {
+ static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+ 
+-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
++static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+ {
+-	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+-	u8 out_buf[2];
+-	u8 in_buf[2];
+-
+ 	struct i2c_msg msgs[] = {
+ 		{
+ 			.addr = dvo->slave_addr,
+ 			.flags = 0,
+ 			.len = 1,
+-			.buf = out_buf,
++			.buf = &addr,
+ 		},
+ 		{
+ 			.addr = dvo->slave_addr,
+ 			.flags = I2C_M_RD,
+ 			.len = 1,
+-			.buf = in_buf,
++			.buf = val,
+ 		}
+ 	};
+-
+-	out_buf[0] = addr;
+-	out_buf[1] = 0;
+-
+-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+-		*val= in_buf[0];
+-		return true;
+-	};
+-
+-	return false;
++	return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+ }
+ 
+-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
++static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+ {
+-	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+-	uint8_t out_buf[2];
++	uint8_t buf[2] = { addr, val };
+ 	struct i2c_msg msg = {
+ 		.addr = dvo->slave_addr,
+ 		.flags = 0,
+ 		.len = 2,
+-		.buf = out_buf,
++		.buf = buf,
+ 	};
+-
+-	out_buf[0] = addr;
+-	out_buf[1] = val;
+-
+-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+-		return true;
+-
+-	return false;
++	return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+ }
+ 
+ /** Probes for a CH7017 on the given bus and slave address. */
+ static bool ch7017_init(struct intel_dvo_device *dvo,
+ 			struct i2c_adapter *adapter)
+ {
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	struct ch7017_priv *priv;
+-	uint8_t val;
++	const char *str;
++	u8 val;
+ 
+ 	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ 	if (priv == NULL)
+@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
+ 	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ 		goto fail;
+ 
+-	if (val != CH7017_DEVICE_ID_VALUE &&
+-	    val != CH7018_DEVICE_ID_VALUE &&
+-	    val != CH7019_DEVICE_ID_VALUE) {
++	switch (val) {
++	case CH7017_DEVICE_ID_VALUE:
++		str = "ch7017";
++		break;
++	case CH7018_DEVICE_ID_VALUE:
++		str = "ch7018";
++		break;
++	case CH7019_DEVICE_ID_VALUE:
++		str = "ch7019";
++		break;
++	default:
+ 		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+-				"Slave %d.\n",
+-			  val, i2cbus->adapter.name,dvo->slave_addr);
++			      "slave %d.\n",
++			      val, adapter->name,dvo->slave_addr);
+ 		goto fail;
+ 	}
+ 
++	DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
++		      str, adapter->name, dvo->slave_addr);
+ 	return true;
++
+ fail:
+ 	kfree(priv);
+ 	return false;
+@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+ 	}
+ 
+ 	/* XXX: Should actually wait for update power status somehow */
+-	udelay(20000);
++	msleep(20);
+ }
+ 
+ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+index 6f1944b..7eaa94e 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ 	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	u8 out_buf[2];
+ 	u8 in_buf[2];
+ 
+@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = 0;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++	if (i2c_transfer(adapter, msgs, 2) == 2) {
+ 		*ch = in_buf[0];
+ 		return true;
+ 	};
+ 
+ 	if (!ch7xxx->quiet) {
+ 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 	return false;
+ }
+@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ 	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	uint8_t out_buf[2];
+ 	struct i2c_msg msg = {
+ 		.addr = dvo->slave_addr,
+@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = ch;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++	if (i2c_transfer(adapter, &msg, 1) == 1)
+ 		return true;
+ 
+ 	if (!ch7xxx->quiet) {
+ 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 
+ 	return false;
+diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
+index a2ec3f4..a12ed94 100644
+--- a/drivers/gpu/drm/i915/dvo_ivch.c
++++ b/drivers/gpu/drm/i915/dvo_ivch.c
+@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ {
+ 	struct ivch_priv *priv = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	u8 out_buf[1];
+ 	u8 in_buf[2];
+ 
+@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ 
+ 	out_buf[0] = addr;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
++	if (i2c_transfer(adapter, msgs, 3) == 3) {
+ 		*data = (in_buf[1] << 8) | in_buf[0];
+ 		return true;
+ 	};
+@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ 	if (!priv->quiet) {
+ 		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ 				"%s:%02x.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 	return false;
+ }
+@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+ {
+ 	struct ivch_priv *priv = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	u8 out_buf[3];
+ 	struct i2c_msg msg = {
+ 		.addr = dvo->slave_addr,
+@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+ 	out_buf[1] = data & 0xff;
+ 	out_buf[2] = data >> 8;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++	if (i2c_transfer(adapter, &msg, 1) == 1)
+ 		return true;
+ 
+ 	if (!priv->quiet) {
+ 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 
+ 	return false;
+diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
+index 9b8e676..e4b4091 100644
+--- a/drivers/gpu/drm/i915/dvo_sil164.c
++++ b/drivers/gpu/drm/i915/dvo_sil164.c
+@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ 	struct sil164_priv *sil = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	u8 out_buf[2];
+ 	u8 in_buf[2];
+ 
+@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = 0;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++	if (i2c_transfer(adapter, msgs, 2) == 2) {
+ 		*ch = in_buf[0];
+ 		return true;
+ 	};
+ 
+ 	if (!sil->quiet) {
+ 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 	return false;
+ }
+@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ 	struct sil164_priv *sil= dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	uint8_t out_buf[2];
+ 	struct i2c_msg msg = {
+ 		.addr = dvo->slave_addr,
+@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = ch;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++	if (i2c_transfer(adapter, &msg, 1) == 1)
+ 		return true;
+ 
+ 	if (!sil->quiet) {
+ 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 
+ 	return false;
+diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
+index 56f6642..8ab2855 100644
+--- a/drivers/gpu/drm/i915/dvo_tfp410.c
++++ b/drivers/gpu/drm/i915/dvo_tfp410.c
+@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ 	struct tfp410_priv *tfp = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	u8 out_buf[2];
+ 	u8 in_buf[2];
+ 
+@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = 0;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++	if (i2c_transfer(adapter, msgs, 2) == 2) {
+ 		*ch = in_buf[0];
+ 		return true;
+ 	};
+ 
+ 	if (!tfp->quiet) {
+ 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 	return false;
+ }
+@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ 	struct tfp410_priv *tfp = dvo->dev_priv;
+ 	struct i2c_adapter *adapter = dvo->i2c_bus;
+-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ 	uint8_t out_buf[2];
+ 	struct i2c_msg msg = {
+ 		.addr = dvo->slave_addr,
+@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 	out_buf[0] = addr;
+ 	out_buf[1] = ch;
+ 
+-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++	if (i2c_transfer(adapter, &msg, 1) == 1)
+ 		return true;
+ 
+ 	if (!tfp->quiet) {
+ 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+-			  addr, i2cbus->adapter.name, dvo->slave_addr);
++			  addr, adapter->name, dvo->slave_addr);
+ 	}
+ 
+ 	return false;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 5e43d70..1f4f3ce 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -40,9 +40,51 @@
+ 
+ #if defined(CONFIG_DEBUG_FS)
+ 
+-#define ACTIVE_LIST	1
+-#define FLUSHING_LIST	2
+-#define INACTIVE_LIST	3
++enum {
++	ACTIVE_LIST,
++	FLUSHING_LIST,
++	INACTIVE_LIST,
++	PINNED_LIST,
++	DEFERRED_FREE_LIST,
++};
++
++static const char *yesno(int v)
++{
++	return v ? "yes" : "no";
++}
++
++static int i915_capabilities(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	const struct intel_device_info *info = INTEL_INFO(dev);
++
++	seq_printf(m, "gen: %d\n", info->gen);
++#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
++	B(is_mobile);
++	B(is_i85x);
++	B(is_i915g);
++	B(is_i945gm);
++	B(is_g33);
++	B(need_gfx_hws);
++	B(is_g4x);
++	B(is_pineview);
++	B(is_broadwater);
++	B(is_crestline);
++	B(has_fbc);
++	B(has_rc6);
++	B(has_pipe_cxsr);
++	B(has_hotplug);
++	B(cursor_needs_physical);
++	B(has_overlay);
++	B(overlay_needs_physical);
++	B(supports_tv);
++	B(has_bsd_ring);
++	B(has_blt_ring);
++#undef B
++
++	return 0;
++}
+ 
+ static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+ {
+@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+     }
+ }
+ 
++static void
++describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
++{
++	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
++		   &obj->base,
++		   get_pin_flag(obj),
++		   get_tiling_flag(obj),
++		   obj->base.size,
++		   obj->base.read_domains,
++		   obj->base.write_domain,
++		   obj->last_rendering_seqno,
++		   obj->dirty ? " dirty" : "",
++		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
++	if (obj->base.name)
++		seq_printf(m, " (name: %d)", obj->base.name);
++	if (obj->fence_reg != I915_FENCE_REG_NONE)
++		seq_printf(m, " (fence: %d)", obj->fence_reg);
++	if (obj->gtt_space != NULL)
++		seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
++	if (obj->ring != NULL)
++		seq_printf(m, " (%s)", obj->ring->name);
++}
++
+ static int i915_gem_object_list_info(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv;
+-	spinlock_t *lock = NULL;
++	size_t total_obj_size, total_gtt_size;
++	int count, ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	switch (list) {
+ 	case ACTIVE_LIST:
+ 		seq_printf(m, "Active:\n");
+-		lock = &dev_priv->mm.active_list_lock;
+-		head = &dev_priv->render_ring.active_list;
++		head = &dev_priv->mm.active_list;
+ 		break;
+ 	case INACTIVE_LIST:
+ 		seq_printf(m, "Inactive:\n");
+ 		head = &dev_priv->mm.inactive_list;
+ 		break;
++	case PINNED_LIST:
++		seq_printf(m, "Pinned:\n");
++		head = &dev_priv->mm.pinned_list;
++		break;
+ 	case FLUSHING_LIST:
+ 		seq_printf(m, "Flushing:\n");
+ 		head = &dev_priv->mm.flushing_list;
+ 		break;
++	case DEFERRED_FREE_LIST:
++		seq_printf(m, "Deferred free:\n");
++		head = &dev_priv->mm.deferred_free_list;
++		break;
+ 	default:
+-		DRM_INFO("Ooops, unexpected list\n");
+-		return 0;
++		mutex_unlock(&dev->struct_mutex);
++		return -EINVAL;
+ 	}
+ 
+-	if (lock)
+-		spin_lock(lock);
+-	list_for_each_entry(obj_priv, head, list)
+-	{
+-		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
+-			   &obj_priv->base,
+-			   get_pin_flag(obj_priv),
+-			   obj_priv->base.size,
+-			   obj_priv->base.read_domains,
+-			   obj_priv->base.write_domain,
+-			   obj_priv->last_rendering_seqno,
+-			   obj_priv->dirty ? " dirty" : "",
+-			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+-
+-		if (obj_priv->base.name)
+-			seq_printf(m, " (name: %d)", obj_priv->base.name);
+-		if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+-			seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
+-		if (obj_priv->gtt_space != NULL)
+-			seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
+-
++	total_obj_size = total_gtt_size = count = 0;
++	list_for_each_entry(obj_priv, head, mm_list) {
++		seq_printf(m, "   ");
++		describe_obj(m, obj_priv);
+ 		seq_printf(m, "\n");
++		total_obj_size += obj_priv->base.size;
++		total_gtt_size += obj_priv->gtt_space->size;
++		count++;
+ 	}
++	mutex_unlock(&dev->struct_mutex);
+ 
+-	if (lock)
+-	    spin_unlock(lock);
++	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
++		   count, total_obj_size, total_gtt_size);
+ 	return 0;
+ }
+ 
++static int i915_gem_object_info(struct seq_file *m, void* data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
++
++	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
++	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
++	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
++	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
++	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
++	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
++	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
++
++	mutex_unlock(&dev->struct_mutex);
++
++	return 0;
++}
++
++
+ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_request *gem_request;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	seq_printf(m, "Request:\n");
+ 	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
+ 			   gem_request->seqno,
+ 			   (int) (jiffies - gem_request->emitted_jiffies));
+ 	}
++	mutex_unlock(&dev->struct_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
+ 		seq_printf(m, "Current sequence: %d\n",
+-			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
++			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
+ 	} else {
+ 		seq_printf(m, "Current sequence: hws uninitialized\n");
+ 	}
+ 	seq_printf(m, "Waiter sequence:  %d\n",
+ 			dev_priv->mm.waiting_gem_seqno);
+ 	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
++
++	mutex_unlock(&dev->struct_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	if (!HAS_PCH_SPLIT(dev)) {
+ 		seq_printf(m, "Interrupt enable:    %08x\n",
+@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ 		   atomic_read(&dev_priv->irq_received));
+ 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
+ 		seq_printf(m, "Current sequence:    %d\n",
+-			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
++			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
+ 	} else {
+ 		seq_printf(m, "Current sequence:    hws uninitialized\n");
+ 	}
+@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ 		   dev_priv->mm.waiting_gem_seqno);
+ 	seq_printf(m, "IRQ sequence:        %d\n",
+ 		   dev_priv->mm.irq_gem_seqno);
++	mutex_unlock(&dev->struct_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int i;
++	int i, ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+ 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+ 			seq_printf(m, "\n");
+ 		}
+ 	}
++	mutex_unlock(&dev->struct_mutex);
+ 
+ 	return 0;
+ }
+@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
+ 	return 0;
+ }
+ 
+-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
++static void i915_dump_object(struct seq_file *m,
++			     struct io_mapping *mapping,
++			     struct drm_i915_gem_object *obj_priv)
+ {
+-	int page, i;
+-	uint32_t *mem;
++	int page, page_count, i;
+ 
++	page_count = obj_priv->base.size / PAGE_SIZE;
+ 	for (page = 0; page < page_count; page++) {
+-		mem = kmap_atomic(pages[page], KM_USER0);
++		u32 *mem = io_mapping_map_wc(mapping,
++					     obj_priv->gtt_offset + page * PAGE_SIZE);
+ 		for (i = 0; i < PAGE_SIZE; i += 4)
+ 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
+-		kunmap_atomic(mem, KM_USER0);
++		io_mapping_unmap(mem);
+ 	}
+ }
+ 
+@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
+ 	struct drm_i915_gem_object *obj_priv;
+ 	int ret;
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+-	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+-			list) {
++	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ 		obj = &obj_priv->base;
+ 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+-		    ret = i915_gem_object_get_pages(obj, 0);
+-		    if (ret) {
+-			    DRM_ERROR("Failed to get pages: %d\n", ret);
+-			    spin_unlock(&dev_priv->mm.active_list_lock);
+-			    return ret;
+-		    }
+-
+-		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
+-		    i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
+-
+-		    i915_gem_object_put_pages(obj);
++		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
++			       obj_priv->gtt_offset);
++		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ 		}
+ 	}
+ 
+-	spin_unlock(&dev_priv->mm.active_list_lock);
++	mutex_unlock(&dev->struct_mutex);
+ 
+ 	return 0;
+ }
+@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	u8 *virt;
+-	uint32_t *ptr, off;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	if (!dev_priv->render_ring.gem_object) {
+ 		seq_printf(m, "No ringbuffer setup\n");
+-		return 0;
+-	}
+-
+-	virt = dev_priv->render_ring.virtual_start;
++	} else {
++		u8 *virt = dev_priv->render_ring.virtual_start;
++		uint32_t off;
+ 
+-	for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+-		ptr = (uint32_t *)(virt + off);
+-		seq_printf(m, "%08x :  %08x\n", off, *ptr);
++		for (off = 0; off < dev_priv->render_ring.size; off += 4) {
++			uint32_t *ptr = (uint32_t *)(virt + off);
++			seq_printf(m, "%08x :  %08x\n", off, *ptr);
++		}
+ 	}
++	mutex_unlock(&dev->struct_mutex);
+ 
+ 	return 0;
+ }
+@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
+ 	seq_printf(m, "RingHead :  %08x\n", head);
+ 	seq_printf(m, "RingTail :  %08x\n", tail);
+ 	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
+-	seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
++	seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+ 
+ 	return 0;
+ }
+@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
+ 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
+ 	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
+ 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+ 	}
+@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
+ 	} else {
+ 		seq_printf(m, "FBC disabled: ");
+ 		switch (dev_priv->no_fbc_reason) {
++		case FBC_NO_OUTPUT:
++			seq_printf(m, "no outputs");
++			break;
+ 		case FBC_STOLEN_TOO_SMALL:
+ 			seq_printf(m, "not enough stolen memory");
+ 			break;
+@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	bool sr_enabled = false;
+ 
+-	if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
++	if (IS_GEN5(dev))
++		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
++	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ 	else if (IS_I915GM(dev))
+ 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ 	else if (IS_PINEVIEW(dev))
+ 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ 
+-	seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
+-		   "disabled");
++	seq_printf(m, "self-refresh: %s\n",
++		   sr_enabled ? "enabled" : "disabled");
+ 
+ 	return 0;
+ }
+@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	unsigned long temp, chipset, gfx;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
+ 
+ 	temp = i915_mch_val(dev_priv);
+ 	chipset = i915_chipset_val(dev_priv);
+ 	gfx = i915_gfx_val(dev_priv);
++	mutex_unlock(&dev->struct_mutex);
+ 
+ 	seq_printf(m, "GMCH temp: %ld\n", temp);
+ 	seq_printf(m, "Chipset power: %ld\n", chipset);
+@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
+ 	return 0;
+ }
+ 
++static int i915_opregion(struct seq_file *m, void *unused)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
++
++	if (opregion->header)
++		seq_write(m, opregion->header, OPREGION_SIZE);
++
++	mutex_unlock(&dev->struct_mutex);
++
++	return 0;
++}
++
++static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_fbdev *ifbdev;
++	struct intel_framebuffer *fb;
++	int ret;
++
++	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
++	if (ret)
++		return ret;
++
++	ifbdev = dev_priv->fbdev;
++	fb = to_intel_framebuffer(ifbdev->helper.fb);
++
++	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
++		   fb->base.width,
++		   fb->base.height,
++		   fb->base.depth,
++		   fb->base.bits_per_pixel);
++	describe_obj(m, to_intel_bo(fb->obj));
++	seq_printf(m, "\n");
++
++	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
++		if (&fb->base == ifbdev->helper.fb)
++			continue;
++
++		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
++			   fb->base.width,
++			   fb->base.height,
++			   fb->base.depth,
++			   fb->base.bits_per_pixel);
++		describe_obj(m, to_intel_bo(fb->obj));
++		seq_printf(m, "\n");
++	}
++
++	mutex_unlock(&dev->mode_config.mutex);
++
++	return 0;
++}
++
+ static int
+ i915_wedged_open(struct inode *inode,
+ 		 struct file *filp)
+@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
+ 		       "wedged :  %d\n",
+ 		       atomic_read(&dev_priv->mm.wedged));
+ 
++	if (len > sizeof (buf))
++		len = sizeof (buf);
++
+ 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ }
+ 
+@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
+ 
+ 	atomic_set(&dev_priv->mm.wedged, val);
+ 	if (val) {
+-		DRM_WAKEUP(&dev_priv->irq_queue);
++		wake_up_all(&dev_priv->irq_queue);
+ 		queue_work(dev_priv->wq, &dev_priv->error_work);
+ 	}
+ 
+@@ -782,6 +974,7 @@ static const struct file_operations i915_wedged_fops = {
+ 	.open = i915_wedged_open,
+ 	.read = i915_wedged_read,
+ 	.write = i915_wedged_write,
++	.llseek = default_llseek,
+ };
+ 
+ /* As the drm_debugfs_init() routines are called before dev->dev_private is
+@@ -823,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+ }
+ 
+ static struct drm_info_list i915_debugfs_list[] = {
++	{"i915_capabilities", i915_capabilities, 0, 0},
++	{"i915_gem_objects", i915_gem_object_info, 0},
+ 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+ 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
++	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
++	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
+ 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
+ 	{"i915_gem_request", i915_gem_request_info, 0},
+ 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
+@@ -845,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
+ 	{"i915_gfxec", i915_gfxec, 0},
+ 	{"i915_fbc_status", i915_fbc_status, 0},
+ 	{"i915_sr_status", i915_sr_status, 0},
++	{"i915_opregion", i915_opregion, 0},
++	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ };
+ #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+ 
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 2dd2c93..7a26f4dd 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -40,8 +40,7 @@
+ #include <linux/pnp.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
+-
+-extern int intel_max_stolen; /* from AGP driver */
++#include <acpi/video.h>
+ 
+ /**
+  * Sets up the hardware status page for devices that need a physical address
+@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
+ 
+ 	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+ 
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
+ 					     0xf0;
+ 
+@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+-	if (HAS_BSD(dev))
+-		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+ 	/* Clear the HWS virtual address at teardown */
+@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
+ 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
+ 				ring->status_page.page_addr);
+ 	if (ring->status_page.gfx_addr != 0)
+-		ring->setup_status_page(dev, ring);
++		intel_ring_setup_status_page(dev, ring);
+ 	else
+ 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ 
+@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		BEGIN_LP_RING(4);
+ 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ 
+ 		if (!IS_I830(dev) && !IS_845G(dev)) {
+ 			BEGIN_LP_RING(2);
+-			if (IS_I965G(dev)) {
++			if (INTEL_INFO(dev)->gen >= 4) {
+ 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ 				OUT_RING(batch->start);
+ 			} else {
+@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ 	}
+ 
+ 
+-	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
++	if (IS_G4X(dev) || IS_GEN5(dev)) {
+ 		BEGIN_LP_RING(2);
+ 		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ 		OUT_RING(MI_NOOP);
+@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ 	case I915_PARAM_HAS_BSD:
+ 		value = HAS_BSD(dev);
+ 		break;
++	case I915_PARAM_HAS_BLT:
++		value = HAS_BLT(dev);
++		break;
+ 	default:
+ 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+ 				 param->param);
+@@ -888,12 +890,12 @@ static int
+ intel_alloc_mchbar_resource(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ 	u32 temp_lo, temp_hi = 0;
+ 	u64 mchbar_addr;
+ 	int ret;
+ 
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
+ 		return ret;
+ 	}
+ 
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ 				       upper_32_bits(dev_priv->mch_res.start));
+ 
+@@ -934,7 +936,7 @@ static void
+ intel_setup_mchbar(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ 	u32 temp;
+ 	bool enabled;
+ 
+@@ -971,7 +973,7 @@ static void
+ intel_teardown_mchbar(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ 	u32 temp;
+ 
+ 	if (dev_priv->mchbar_need_disable) {
+@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
+ 		release_resource(&dev_priv->mch_res);
+ }
+ 
+-/**
+- * i915_probe_agp - get AGP bootup configuration
+- * @pdev: PCI device
+- * @aperture_size: returns AGP aperture configured size
+- * @preallocated_size: returns size of BIOS preallocated AGP space
+- *
+- * Since Intel integrated graphics are UMA, the BIOS has to set aside
+- * some RAM for the framebuffer at early boot.  This code figures out
+- * how much was set aside so we can use it for our own purposes.
+- */
+-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
+-			  uint32_t *preallocated_size,
+-			  uint32_t *start)
+-{
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u16 tmp = 0;
+-	unsigned long overhead;
+-	unsigned long stolen;
+-
+-	/* Get the fb aperture size and "stolen" memory amount. */
+-	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
+-
+-	*aperture_size = 1024 * 1024;
+-	*preallocated_size = 1024 * 1024;
+-
+-	switch (dev->pdev->device) {
+-	case PCI_DEVICE_ID_INTEL_82830_CGC:
+-	case PCI_DEVICE_ID_INTEL_82845G_IG:
+-	case PCI_DEVICE_ID_INTEL_82855GM_IG:
+-	case PCI_DEVICE_ID_INTEL_82865_IG:
+-		if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+-			*aperture_size *= 64;
+-		else
+-			*aperture_size *= 128;
+-		break;
+-	default:
+-		/* 9xx supports large sizes, just look at the length */
+-		*aperture_size = pci_resource_len(dev->pdev, 2);
+-		break;
+-	}
+-
+-	/*
+-	 * Some of the preallocated space is taken by the GTT
+-	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
+-	 */
+-	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
+-		overhead = 4096;
+-	else
+-		overhead = (*aperture_size / 1024) + 4096;
+-
+-	if (IS_GEN6(dev)) {
+-		/* SNB has memory control reg at 0x50.w */
+-		pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
+-
+-		switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
+-		case INTEL_855_GMCH_GMS_DISABLED:
+-			DRM_ERROR("video memory is disabled\n");
+-			return -1;
+-		case SNB_GMCH_GMS_STOLEN_32M:
+-			stolen = 32 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_64M:
+-			stolen = 64 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_96M:
+-			stolen = 96 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_128M:
+-			stolen = 128 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_160M:
+-			stolen = 160 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_192M:
+-			stolen = 192 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_224M:
+-			stolen = 224 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_256M:
+-			stolen = 256 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_288M:
+-			stolen = 288 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_320M:
+-			stolen = 320 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_352M:
+-			stolen = 352 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_384M:
+-			stolen = 384 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_416M:
+-			stolen = 416 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_448M:
+-			stolen = 448 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_480M:
+-			stolen = 480 * 1024 * 1024;
+-			break;
+-		case SNB_GMCH_GMS_STOLEN_512M:
+-			stolen = 512 * 1024 * 1024;
+-			break;
+-		default:
+-			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+-				  tmp & SNB_GMCH_GMS_STOLEN_MASK);
+-			return -1;
+-		}
+-	} else {
+-		switch (tmp & INTEL_GMCH_GMS_MASK) {
+-		case INTEL_855_GMCH_GMS_DISABLED:
+-			DRM_ERROR("video memory is disabled\n");
+-			return -1;
+-		case INTEL_855_GMCH_GMS_STOLEN_1M:
+-			stolen = 1 * 1024 * 1024;
+-			break;
+-		case INTEL_855_GMCH_GMS_STOLEN_4M:
+-			stolen = 4 * 1024 * 1024;
+-			break;
+-		case INTEL_855_GMCH_GMS_STOLEN_8M:
+-			stolen = 8 * 1024 * 1024;
+-			break;
+-		case INTEL_855_GMCH_GMS_STOLEN_16M:
+-			stolen = 16 * 1024 * 1024;
+-			break;
+-		case INTEL_855_GMCH_GMS_STOLEN_32M:
+-			stolen = 32 * 1024 * 1024;
+-			break;
+-		case INTEL_915G_GMCH_GMS_STOLEN_48M:
+-			stolen = 48 * 1024 * 1024;
+-			break;
+-		case INTEL_915G_GMCH_GMS_STOLEN_64M:
+-			stolen = 64 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_128M:
+-			stolen = 128 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_256M:
+-			stolen = 256 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_96M:
+-			stolen = 96 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_160M:
+-			stolen = 160 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_224M:
+-			stolen = 224 * 1024 * 1024;
+-			break;
+-		case INTEL_GMCH_GMS_STOLEN_352M:
+-			stolen = 352 * 1024 * 1024;
+-			break;
+-		default:
+-			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+-				  tmp & INTEL_GMCH_GMS_MASK);
+-			return -1;
+-		}
+-	}
+-
+-	*preallocated_size = stolen - overhead;
+-	*start = overhead;
+-
+-	return 0;
+-}
+-
+ #define PTE_ADDRESS_MASK		0xfffff000
+ #define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
+ #define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
+@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ {
+ 	unsigned long *gtt;
+ 	unsigned long entry, phys;
+-	int gtt_bar = IS_I9XX(dev) ? 0 : 1;
++	int gtt_bar = IS_GEN2(dev) ? 1 : 0;
+ 	int gtt_offset, gtt_size;
+ 
+-	if (IS_I965G(dev)) {
+-		if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
++		if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
+ 			gtt_offset = 2*1024*1024;
+ 			gtt_size = 2*1024*1024;
+ 		} else {
+@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ 	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ 
+ 	/* Mask out these reserved bits on this hardware. */
+-	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+-	    IS_I945G(dev) || IS_I945GM(dev)) {
++	if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
+ 		entry &= ~PTE_ADDRESS_MASK_HIGH;
+-	}
+ 
+ 	/* If it's not a mapping type we know, then bail. */
+ 	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
+@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ 	unsigned long ll_base = 0;
+ 
+ 	/* Leave 1M for line length buffer & misc. */
+-	compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
++	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
+ 	if (!compressed_fb) {
+ 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ 		i915_warn_stolen(dev);
+@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ 	}
+ 
+ 	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
+-		compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
++		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
+ 						    4096, 0);
+ 		if (!compressed_llb) {
+ 			i915_warn_stolen(dev);
+@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
+ 		/* i915 resume handler doesn't set to D0 */
+ 		pci_set_power_state(dev->pdev, PCI_D0);
+ 		i915_resume(dev);
+-		drm_kms_helper_poll_enable(dev);
+ 	} else {
+ 		printk(KERN_ERR "i915: switched off\n");
+-		drm_kms_helper_poll_disable(dev);
+ 		i915_suspend(dev, pmm);
+ 	}
+ }
+@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+ }
+ 
+ static int i915_load_modeset_init(struct drm_device *dev,
+-				  unsigned long prealloc_start,
+ 				  unsigned long prealloc_size,
+ 				  unsigned long agp_size)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int fb_bar = IS_I9XX(dev) ? 2 : 0;
+ 	int ret = 0;
+ 
+-	dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
+-		0xff000000;
+-
+-	/* Basic memrange allocator for stolen space (aka vram) */
+-	drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+-	DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+-
+-	/* We're off and running w/KMS */
+-	dev_priv->mm.suspended = 0;
++	/* Basic memrange allocator for stolen space (aka mm.vram) */
++	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+ 
+ 	/* Let GEM Manage from end of prealloc space to end of aperture.
+ 	 *
+@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ 	 */
+ 	dev_priv->allow_batchbuffer = 1;
+ 
+-	ret = intel_init_bios(dev);
++	ret = intel_parse_bios(dev);
+ 	if (ret)
+ 		DRM_INFO("failed to find VBIOS tables\n");
+ 
+@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ 	if (ret)
+ 		goto cleanup_ringbuffer;
+ 
++	intel_register_dsm_handler();
++
+ 	ret = vga_switcheroo_register_client(dev->pdev,
+ 					     i915_switcheroo_set_state,
+ 					     i915_switcheroo_can_switch);
+@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ 	/* FIXME: do pre/post-mode set stuff in core KMS code */
+ 	dev->vblank_disable_allowed = 1;
+ 
+-	/*
+-	 * Initialize the hardware status page IRQ location.
+-	 */
+-
+-	I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+-
+ 	ret = intel_fbdev_init(dev);
+ 	if (ret)
+ 		goto cleanup_irq;
+ 
+ 	drm_kms_helper_poll_init(dev);
++
++	/* We're off and running w/KMS */
++	dev_priv->mm.suspended = 0;
++
+ 	return 0;
+ 
+ cleanup_irq:
+@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
+  *   - dev_priv->fmax
+  *   - dev_priv->gpu_busy
+  */
+-DEFINE_SPINLOCK(mchdev_lock);
++static DEFINE_SPINLOCK(mchdev_lock);
+ 
+ /**
+  * i915_read_mch_val - return value for IPS use
+@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	struct drm_i915_private *dev_priv;
+ 	resource_size_t base, size;
+ 	int ret = 0, mmio_bar;
+-	uint32_t agp_size, prealloc_size, prealloc_start;
++	uint32_t agp_size, prealloc_size;
+ 	/* i915 has 4 more counters */
+ 	dev->counters += 4;
+ 	dev->types[6] = _DRM_STAT_IRQ;
+@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev_priv->info = (struct intel_device_info *) flags;
+ 
+ 	/* Add register map (needed for suspend/resume) */
+-	mmio_bar = IS_I9XX(dev) ? 0 : 1;
++	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ 	base = pci_resource_start(dev->pdev, mmio_bar);
+ 	size = pci_resource_len(dev->pdev, mmio_bar);
+ 
+@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 			 "performance may suffer.\n");
+ 	}
+ 
+-	ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
+-	if (ret)
++	dev_priv->mm.gtt = intel_gtt_get();
++	if (!dev_priv->mm.gtt) {
++		DRM_ERROR("Failed to initialize GTT\n");
++		ret = -ENODEV;
+ 		goto out_iomapfree;
+-
+-	if (prealloc_size > intel_max_stolen) {
+-		DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
+-			 prealloc_size >> 20, intel_max_stolen >> 20);
+-		prealloc_size = intel_max_stolen;
+ 	}
+ 
+-	dev_priv->wq = create_singlethread_workqueue("i915");
++	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
++	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++
++	/* The i915 workqueue is primarily used for batched retirement of
++	 * requests (and thus managing bo) once the task has been completed
++	 * by the GPU. i915_gem_retire_requests() is called directly when we
++	 * need high-priority retirement, such as waiting for an explicit
++	 * bo.
++	 *
++	 * It is also used for periodic low-priority events, such as
++	 * idle-timers and hangcheck.
++	 *
++	 * All tasks on the workqueue are expected to acquire the dev mutex
++	 * so there is no point in running more than one instance of the
++	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
++	 */
++	dev_priv->wq = alloc_workqueue("i915",
++				       WQ_UNBOUND | WQ_NON_REENTRANT,
++				       1);
+ 	if (dev_priv->wq == NULL) {
+ 		DRM_ERROR("Failed to create our workqueue.\n");
+ 		ret = -ENOMEM;
+@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 
+ 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+-	if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ 	}
+ 
+ 	/* Try to make sure MCHBAR is enabled before poking at it */
+ 	intel_setup_mchbar(dev);
++	intel_setup_gmbus(dev);
++	intel_opregion_setup(dev);
++
++	/* Make sure the bios did its job and set up vital registers */
++	intel_setup_bios(dev);
+ 
+ 	i915_gem_load(dev);
+ 
+@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 
+ 	if (IS_PINEVIEW(dev))
+ 		i915_pineview_get_mem_freq(dev);
+-	else if (IS_IRONLAKE(dev))
++	else if (IS_GEN5(dev))
+ 		i915_ironlake_get_mem_freq(dev);
+ 
+ 	/* On the 945G/GM, the chipset reports the MSI capability on the
+@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	intel_detect_pch(dev);
+ 
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+-		ret = i915_load_modeset_init(dev, prealloc_start,
+-					     prealloc_size, agp_size);
++		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ 		if (ret < 0) {
+ 			DRM_ERROR("failed to init modeset\n");
+ 			goto out_workqueue_free;
+@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	}
+ 
+ 	/* Must be done after probing outputs */
+-	intel_opregion_init(dev, 0);
++	intel_opregion_init(dev);
++	acpi_video_register();
+ 
+ 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ 		    (unsigned long) dev);
+@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev_priv->mchdev_lock = &mchdev_lock;
+ 	spin_unlock(&mchdev_lock);
+ 
+-	/* XXX Prevent module unload due to memory corruption bugs. */
+-	__module_get(THIS_MODULE);
+-
+ 	return 0;
+ 
+ out_workqueue_free:
+@@ -2252,15 +2090,20 @@ free_priv:
+ int i915_driver_unload(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-
+-	i915_destroy_error_state(dev);
++	int ret;
+ 
+ 	spin_lock(&mchdev_lock);
+ 	i915_mch_dev = NULL;
+ 	spin_unlock(&mchdev_lock);
+ 
+-	destroy_workqueue(dev_priv->wq);
+-	del_timer_sync(&dev_priv->hangcheck_timer);
++	mutex_lock(&dev->struct_mutex);
++	ret = i915_gpu_idle(dev);
++	if (ret)
++		DRM_ERROR("failed to idle hardware: %d\n", ret);
++	mutex_unlock(&dev->struct_mutex);
++
++	/* Cancel the retire work handler, which should be idle now. */
++	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+ 
+ 	io_mapping_free(dev_priv->mm.gtt_mapping);
+ 	if (dev_priv->mm.gtt_mtrr >= 0) {
+@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
+ 		dev_priv->mm.gtt_mtrr = -1;
+ 	}
+ 
++	acpi_video_unregister();
++
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		intel_fbdev_fini(dev);
+ 		intel_modeset_cleanup(dev);
+ 
+ 		/*
+@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
+ 			dev_priv->child_dev = NULL;
+ 			dev_priv->child_dev_num = 0;
+ 		}
+-		drm_irq_uninstall(dev);
++
+ 		vga_switcheroo_unregister_client(dev->pdev);
+ 		vga_client_register(dev->pdev, NULL, NULL, NULL);
+ 	}
+ 
++	/* Free error state after interrupts are fully disabled. */
++	del_timer_sync(&dev_priv->hangcheck_timer);
++	cancel_work_sync(&dev_priv->error_work);
++	i915_destroy_error_state(dev);
++
+ 	if (dev->pdev->msi_enabled)
+ 		pci_disable_msi(dev->pdev);
+ 
+-	if (dev_priv->regs != NULL)
+-		iounmap(dev_priv->regs);
+-
+-	intel_opregion_free(dev, 0);
++	intel_opregion_fini(dev);
+ 
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		/* Flush any outstanding unpin_work. */
++		flush_workqueue(dev_priv->wq);
++
+ 		i915_gem_free_all_phys_object(dev);
+ 
+ 		mutex_lock(&dev->struct_mutex);
+@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
+ 		mutex_unlock(&dev->struct_mutex);
+ 		if (I915_HAS_FBC(dev) && i915_powersave)
+ 			i915_cleanup_compression(dev);
+-		drm_mm_takedown(&dev_priv->vram);
+-		i915_gem_lastclose(dev);
++		drm_mm_takedown(&dev_priv->mm.vram);
+ 
+ 		intel_cleanup_overlay(dev);
++
++		if (!I915_NEED_GFX_HWS(dev))
++			i915_free_hws(dev);
+ 	}
+ 
++	if (dev_priv->regs != NULL)
++		iounmap(dev_priv->regs);
++
++	intel_teardown_gmbus(dev);
+ 	intel_teardown_mchbar(dev);
+ 
++	destroy_workqueue(dev_priv->wq);
++
+ 	pci_dev_put(dev_priv->bridge_dev);
+ 	kfree(dev->dev_private);
+ 
+ 	return 0;
+ }
+ 
+-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+ {
+-	struct drm_i915_file_private *i915_file_priv;
++	struct drm_i915_file_private *file_priv;
+ 
+ 	DRM_DEBUG_DRIVER("\n");
+-	i915_file_priv = (struct drm_i915_file_private *)
+-	    kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
+-
+-	if (!i915_file_priv)
++	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
++	if (!file_priv)
+ 		return -ENOMEM;
+ 
+-	file_priv->driver_priv = i915_file_priv;
++	file->driver_priv = file_priv;
+ 
+-	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
++	spin_lock_init(&file_priv->mm.lock);
++	INIT_LIST_HEAD(&file_priv->mm.request_list);
+ 
+ 	return 0;
+ }
+@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ 		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ }
+ 
+-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+ {
+-	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++	struct drm_i915_file_private *file_priv = file->driver_priv;
+ 
+-	kfree(i915_file_priv);
++	kfree(file_priv);
+ }
+ 
+ struct drm_ioctl_desc i915_ioctls[] = {
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 6dbe14c..027cbfc 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -32,6 +32,7 @@
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
++#include "intel_drv.h"
+ 
+ #include <linux/console.h>
+ #include "drm_crtc_helper.h"
+@@ -43,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
+ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+ 
+ unsigned int i915_powersave = 1;
+-module_param_named(powersave, i915_powersave, int, 0400);
++module_param_named(powersave, i915_powersave, int, 0600);
+ 
+ unsigned int i915_lvds_downclock = 0;
+ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
+ 	.driver_data = (unsigned long) info }
+ 
+ static const struct intel_device_info intel_i830_info = {
+-	.gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
++	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ 
+ static const struct intel_device_info intel_845g_info = {
+-	.gen = 2, .is_i8xx = 1,
++	.gen = 2,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ 
+ static const struct intel_device_info intel_i85x_info = {
+-	.gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
++	.gen = 2, .is_i85x = 1, .is_mobile = 1,
+ 	.cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ 
+ static const struct intel_device_info intel_i865g_info = {
+-	.gen = 2, .is_i8xx = 1,
++	.gen = 2,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ 
+ static const struct intel_device_info intel_i915g_info = {
+-	.gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
++	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ static const struct intel_device_info intel_i915gm_info = {
+-	.gen = 3, .is_i9xx = 1,  .is_mobile = 1,
++	.gen = 3, .is_mobile = 1,
+ 	.cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
++	.supports_tv = 1,
+ };
+ static const struct intel_device_info intel_i945g_info = {
+-	.gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
++	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ static const struct intel_device_info intel_i945gm_info = {
+-	.gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
++	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
+ 	.has_hotplug = 1, .cursor_needs_physical = 1,
++	.has_overlay = 1, .overlay_needs_physical = 1,
++	.supports_tv = 1,
+ };
+ 
+ static const struct intel_device_info intel_i965g_info = {
+-	.gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
++	.gen = 4, .is_broadwater = 1,
+ 	.has_hotplug = 1,
++	.has_overlay = 1,
+ };
+ 
+ static const struct intel_device_info intel_i965gm_info = {
+-	.gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
++	.gen = 4, .is_crestline = 1,
+ 	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
++	.has_overlay = 1,
++	.supports_tv = 1,
+ };
+ 
+ static const struct intel_device_info intel_g33_info = {
+-	.gen = 3, .is_g33 = 1, .is_i9xx = 1,
++	.gen = 3, .is_g33 = 1,
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
++	.has_overlay = 1,
+ };
+ 
+ static const struct intel_device_info intel_g45_info = {
+-	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
++	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+ 	.has_pipe_cxsr = 1, .has_hotplug = 1,
++	.has_bsd_ring = 1,
+ };
+ 
+ static const struct intel_device_info intel_gm45_info = {
+-	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
++	.gen = 4, .is_g4x = 1,
+ 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ 	.has_pipe_cxsr = 1, .has_hotplug = 1,
++	.supports_tv = 1,
++	.has_bsd_ring = 1,
+ };
+ 
+ static const struct intel_device_info intel_pineview_info = {
+-	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
++	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
++	.has_overlay = 1,
+ };
+ 
+ static const struct intel_device_info intel_ironlake_d_info = {
+-	.gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
++	.gen = 5,
+ 	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
++	.has_bsd_ring = 1,
+ };
+ 
+ static const struct intel_device_info intel_ironlake_m_info = {
+-	.gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
++	.gen = 5, .is_mobile = 1,
+ 	.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
++	.has_bsd_ring = 1,
+ };
+ 
+ static const struct intel_device_info intel_sandybridge_d_info = {
+-	.gen = 6, .is_i965g = 1, .is_i9xx = 1,
++	.gen = 6,
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
++	.has_bsd_ring = 1,
++	.has_blt_ring = 1,
+ };
+ 
+ static const struct intel_device_info intel_sandybridge_m_info = {
+-	.gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
++	.gen = 6, .is_mobile = 1,
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
++	.has_bsd_ring = 1,
++	.has_blt_ring = 1,
+ };
+ 
+ static const struct pci_device_id pciidlist[] = {		/* aka */
+@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
+ 
+ 	i915_save_state(dev);
+ 
+-	intel_opregion_free(dev, 1);
++	intel_opregion_fini(dev);
+ 
+ 	/* Modeset on resume, not lid events */
+ 	dev_priv->modeset_on_lid = 0;
+@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
+ 	if (state.event == PM_EVENT_PRETHAW)
+ 		return 0;
+ 
++	drm_kms_helper_poll_disable(dev);
++
+ 	error = i915_drm_freeze(dev);
+ 	if (error)
+ 		return error;
+@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
+ 	int error = 0;
+ 
+ 	i915_restore_state(dev);
+-
+-	intel_opregion_init(dev, 1);
++	intel_opregion_setup(dev);
+ 
+ 	/* KMS EnterVT equivalent */
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
+ 		drm_helper_resume_force_mode(dev);
+ 	}
+ 
++	intel_opregion_init(dev);
++
+ 	dev_priv->modeset_on_lid = 0;
+ 
+ 	return error;
+@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
+ 
+ int i915_resume(struct drm_device *dev)
+ {
++	int ret;
++
+ 	if (pci_enable_device(dev->pdev))
+ 		return -EIO;
+ 
+ 	pci_set_master(dev->pdev);
+ 
+-	return i915_drm_thaw(dev);
++	ret = i915_drm_thaw(dev);
++	if (ret)
++		return ret;
++
++	drm_kms_helper_poll_enable(dev);
++	return 0;
++}
++
++static int i8xx_do_reset(struct drm_device *dev, u8 flags)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (IS_I85X(dev))
++		return -ENODEV;
++
++	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
++	POSTING_READ(D_STATE);
++
++	if (IS_I830(dev) || IS_845G(dev)) {
++		I915_WRITE(DEBUG_RESET_I830,
++			   DEBUG_RESET_DISPLAY |
++			   DEBUG_RESET_RENDER |
++			   DEBUG_RESET_FULL);
++		POSTING_READ(DEBUG_RESET_I830);
++		msleep(1);
++
++		I915_WRITE(DEBUG_RESET_I830, 0);
++		POSTING_READ(DEBUG_RESET_I830);
++	}
++
++	msleep(1);
++
++	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
++	POSTING_READ(D_STATE);
++
++	return 0;
++}
++
++static int i965_reset_complete(struct drm_device *dev)
++{
++	u8 gdrst;
++	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
++	return gdrst & 0x1;
++}
++
++static int i965_do_reset(struct drm_device *dev, u8 flags)
++{
++	u8 gdrst;
++
++	/*
++	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
++	 * well as the reset bit (GR/bit 0).  Setting the GR bit
++	 * triggers the reset; when done, the hardware will clear it.
++	 */
++	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
++	pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
++
++	return wait_for(i965_reset_complete(dev), 500);
++}
++
++static int ironlake_do_reset(struct drm_device *dev, u8 flags)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
++	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
++	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ }
+ 
+ /**
+@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
+  *   - re-init interrupt state
+  *   - re-init display
+  */
+-int i965_reset(struct drm_device *dev, u8 flags)
++int i915_reset(struct drm_device *dev, u8 flags)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	unsigned long timeout;
+-	u8 gdrst;
+ 	/*
+ 	 * We really should only reset the display subsystem if we actually
+ 	 * need to
+ 	 */
+ 	bool need_display = true;
++	int ret;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	/*
+-	 * Clear request list
+-	 */
+-	i915_gem_retire_requests(dev);
+-
+-	if (need_display)
+-		i915_save_display(dev);
+-
+-	if (IS_I965G(dev) || IS_G4X(dev)) {
+-		/*
+-		 * Set the domains we want to reset, then the reset bit (bit 0).
+-		 * Clear the reset bit after a while and wait for hardware status
+-		 * bit (bit 1) to be set
+-		 */
+-		pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+-		pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
+-		udelay(50);
+-		pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
+-
+-		/* ...we don't want to loop forever though, 500ms should be plenty */
+-	       timeout = jiffies + msecs_to_jiffies(500);
+-		do {
+-			udelay(100);
+-			pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+-		} while ((gdrst & 0x1) && time_after(timeout, jiffies));
+-
+-		if (gdrst & 0x1) {
+-			WARN(true, "i915: Failed to reset chip\n");
+-			mutex_unlock(&dev->struct_mutex);
+-			return -EIO;
+-		}
+-	} else {
+-		DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
++	i915_gem_reset(dev);
++
++	ret = -ENODEV;
++	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
++		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
++	} else switch (INTEL_INFO(dev)->gen) {
++	case 5:
++		ret = ironlake_do_reset(dev, flags);
++		break;
++	case 4:
++		ret = i965_do_reset(dev, flags);
++		break;
++	case 2:
++		ret = i8xx_do_reset(dev, flags);
++		break;
++	}
++	dev_priv->last_gpu_reset = get_seconds();
++	if (ret) {
++		DRM_ERROR("Failed to reset chip.\n");
+ 		mutex_unlock(&dev->struct_mutex);
+-		return -ENODEV;
++		return ret;
+ 	}
+ 
+ 	/* Ok, now get things going again... */
+@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
+ 		mutex_lock(&dev->struct_mutex);
+ 	}
+ 
++	mutex_unlock(&dev->struct_mutex);
++
+ 	/*
+-	 * Display needs restore too...
++	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
++	 * need to retrain the display link and cannot just restore the register
++	 * values.
+ 	 */
+-	if (need_display)
+-		i915_restore_display(dev);
++	if (need_display) {
++		mutex_lock(&dev->mode_config.mutex);
++		drm_helper_resume_force_mode(dev);
++		mutex_unlock(&dev->mode_config.mutex);
++	}
+ 
+-	mutex_unlock(&dev->struct_mutex);
+ 	return 0;
+ }
+ 
+@@ -422,6 +508,8 @@ i915_pci_remove(struct pci_dev *pdev)
+ {
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 
++	pci_disable_device(pdev); /* core did previous enable */
++
+ 	drm_put_dev(dev);
+ }
+ 
+@@ -524,8 +612,6 @@ static struct drm_driver driver = {
+ 	.irq_uninstall = i915_driver_irq_uninstall,
+ 	.irq_handler = i915_driver_irq_handler,
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+-	.get_map_ofs = drm_core_get_map_ofs,
+-	.get_reg_ofs = drm_core_get_reg_ofs,
+ 	.master_create = i915_master_create,
+ 	.master_destroy = i915_master_destroy,
+ #if defined(CONFIG_DEBUG_FS)
+@@ -548,6 +634,7 @@ static struct drm_driver driver = {
+ #ifdef CONFIG_COMPAT
+ 		 .compat_ioctl = i915_compat_ioctl,
+ #endif
++		 .llseek = noop_llseek,
+ 	},
+ 
+ 	.pci_driver = {
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index af4a263..90414ae 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -34,6 +34,8 @@
+ #include "intel_bios.h"
+ #include "intel_ringbuffer.h"
+ #include <linux/io-mapping.h>
++#include <linux/i2c.h>
++#include <drm/intel-gtt.h>
+ 
+ /* General customization:
+  */
+@@ -73,11 +75,9 @@ enum plane {
+ #define DRIVER_PATCHLEVEL	0
+ 
+ #define WATCH_COHERENCY	0
+-#define WATCH_BUF	0
+ #define WATCH_EXEC	0
+-#define WATCH_LRU	0
+ #define WATCH_RELOC	0
+-#define WATCH_INACTIVE	0
++#define WATCH_LISTS	0
+ #define WATCH_PWRITE	0
+ 
+ #define I915_GEM_PHYS_CURSOR_0 1
+@@ -110,8 +110,9 @@ struct intel_opregion {
+ 	struct opregion_acpi *acpi;
+ 	struct opregion_swsci *swsci;
+ 	struct opregion_asle *asle;
+-	int enabled;
++	void *vbt;
+ };
++#define OPREGION_SIZE            (8*1024)
+ 
+ struct intel_overlay;
+ struct intel_overlay_error_state;
+@@ -125,13 +126,16 @@ struct drm_i915_master_private {
+ struct drm_i915_fence_reg {
+ 	struct drm_gem_object *obj;
+ 	struct list_head lru_list;
++	bool gpu;
+ };
+ 
+ struct sdvo_device_mapping {
++	u8 initialized;
+ 	u8 dvo_port;
+ 	u8 slave_addr;
+ 	u8 dvo_wiring;
+-	u8 initialized;
++	u8 i2c_pin;
++	u8 i2c_speed;
+ 	u8 ddc_pin;
+ };
+ 
+@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
+ struct intel_device_info {
+ 	u8 gen;
+ 	u8 is_mobile : 1;
+-	u8 is_i8xx : 1;
+ 	u8 is_i85x : 1;
+ 	u8 is_i915g : 1;
+-	u8 is_i9xx : 1;
+ 	u8 is_i945gm : 1;
+-	u8 is_i965g : 1;
+-	u8 is_i965gm : 1;
+ 	u8 is_g33 : 1;
+ 	u8 need_gfx_hws : 1;
+ 	u8 is_g4x : 1;
+ 	u8 is_pineview : 1;
+ 	u8 is_broadwater : 1;
+ 	u8 is_crestline : 1;
+-	u8 is_ironlake : 1;
+ 	u8 has_fbc : 1;
+ 	u8 has_rc6 : 1;
+ 	u8 has_pipe_cxsr : 1;
+ 	u8 has_hotplug : 1;
+ 	u8 cursor_needs_physical : 1;
++	u8 has_overlay : 1;
++	u8 overlay_needs_physical : 1;
++	u8 supports_tv : 1;
++	u8 has_bsd_ring : 1;
++	u8 has_blt_ring : 1;
+ };
+ 
+ enum no_fbc_reason {
++	FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ 	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ 	FBC_MODE_TOO_LARGE, /* mode too large for compression */
+@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
+ 
+ 	void __iomem *regs;
+ 
++	struct intel_gmbus {
++		struct i2c_adapter adapter;
++		struct i2c_adapter *force_bit;
++		u32 reg0;
++	} *gmbus;
++
+ 	struct pci_dev *bridge_dev;
+ 	struct intel_ring_buffer render_ring;
+ 	struct intel_ring_buffer bsd_ring;
++	struct intel_ring_buffer blt_ring;
+ 	uint32_t next_seqno;
+ 
+ 	drm_dma_handle_t *status_page_dmah;
+@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
+ 	int front_offset;
+ 	int current_page;
+ 	int page_flipping;
++#define I915_DEBUG_READ (1<<0)
++#define I915_DEBUG_WRITE (1<<1)
++	unsigned long debug_flags;
+ 
+ 	wait_queue_head_t irq_queue;
+ 	atomic_t irq_received;
+@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
+ 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+ 	int vblank_pipe;
+ 	int num_pipe;
+-	u32 flush_rings;
+-#define FLUSH_RENDER_RING	0x1
+-#define FLUSH_BSD_RING		0x2
+ 
+ 	/* For hangcheck timer */
+-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
++#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+ 	struct timer_list hangcheck_timer;
+ 	int hangcheck_count;
+ 	uint32_t last_acthd;
+ 	uint32_t last_instdone;
+ 	uint32_t last_instdone1;
+ 
+-	struct drm_mm vram;
+-
+ 	unsigned long cfb_size;
+ 	unsigned long cfb_pitch;
++	unsigned long cfb_offset;
+ 	int cfb_fence;
+ 	int cfb_plane;
++	int cfb_y;
+ 
+ 	int irq_enabled;
+ 
+@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
+ 	struct intel_overlay *overlay;
+ 
+ 	/* LVDS info */
+-	int backlight_duty_cycle;  /* restore backlight to this value */
+-	bool panel_wants_dither;
++	int backlight_level;  /* restore backlight to this value */
+ 	struct drm_display_mode *panel_fixed_mode;
+ 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
+ 	unsigned int lvds_vbt:1;
+ 	unsigned int int_crt_support:1;
+ 	unsigned int lvds_use_ssc:1;
+-	unsigned int edp_support:1;
+ 	int lvds_ssc_freq;
+-	int edp_bpp;
++	struct {
++		int rate;
++		int lanes;
++		int preemphasis;
++		int vswing;
++
++		bool initialized;
++		bool support;
++		int bpp;
++		struct edp_power_seq pps;
++	} edp;
++	bool no_aux_handshake;
+ 
+ 	struct notifier_block lid_notifier;
+ 
+-	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
++	int crt_ddc_pin;
+ 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
+ 	spinlock_t error_lock;
+ 	struct drm_i915_error_state *first_error;
+ 	struct work_struct error_work;
++	struct completion error_completion;
+ 	struct workqueue_struct *wq;
+ 
+ 	/* Display functions */
+@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
+ 	u32 saveMCHBAR_RENDER_STANDBY;
+ 
+ 	struct {
++		/** Bridge to intel-gtt-ko */
++		struct intel_gtt *gtt;
++		/** Memory allocator for GTT stolen memory */
++		struct drm_mm vram;
++		/** Memory allocator for GTT */
+ 		struct drm_mm gtt_space;
+ 
+ 		struct io_mapping *gtt_mapping;
+@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
+ 		 */
+ 		struct list_head shrink_list;
+ 
+-		spinlock_t active_list_lock;
++		/**
++		 * List of objects currently involved in rendering.
++		 *
++		 * Includes buffers having the contents of their GPU caches
++		 * flushed, not necessarily primitives.  last_rendering_seqno
++		 * represents when the rendering involved will be completed.
++		 *
++		 * A reference is held on the buffer while on this list.
++		 */
++		struct list_head active_list;
+ 
+ 		/**
+ 		 * List of objects which are not in the ringbuffer but which
+@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
+ 		struct list_head flushing_list;
+ 
+ 		/**
+-		 * List of objects currently pending a GPU write flush.
+-		 *
+-		 * All elements on this list will belong to either the
+-		 * active_list or flushing_list, last_rendering_seqno can
+-		 * be used to differentiate between the two elements.
+-		 */
+-		struct list_head gpu_write_list;
+-
+-		/**
+ 		 * LRU list of objects which are not in the ringbuffer and
+ 		 * are ready to unbind, but are still in the GTT.
+ 		 *
+@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
+ 		 */
+ 		struct list_head inactive_list;
+ 
++		/**
++		 * LRU list of objects which are not in the ringbuffer but
++		 * are still pinned in the GTT.
++		 */
++		struct list_head pinned_list;
++
+ 		/** LRU list of objects with fence regs on them. */
+ 		struct list_head fence_list;
+ 
+@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
+ 
+ 		/* storage for physical objects */
+ 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
++
++		uint32_t flush_rings;
++
++		/* accounting, useful for userland debugging */
++		size_t object_memory;
++		size_t pin_memory;
++		size_t gtt_memory;
++		size_t gtt_total;
++		u32 object_count;
++		u32 pin_count;
++		u32 gtt_count;
+ 	} mm;
+ 	struct sdvo_device_mapping sdvo_mappings[2];
+ 	/* indicate whether the LVDS_BORDER should be enabled or not */
+@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
+ 	/* Reclocking support */
+ 	bool render_reclock_avail;
+ 	bool lvds_downclock_avail;
+-	/* indicate whether the LVDS EDID is OK */
+-	bool lvds_edid_good;
+ 	/* indicates the reduced downclock for LVDS*/
+ 	int lvds_downclock;
+ 	struct work_struct idle_work;
+@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
+ 	struct drm_mm_node *compressed_fb;
+ 	struct drm_mm_node *compressed_llb;
+ 
++	unsigned long last_gpu_reset;
++
+ 	/* list of fbdev register on this device */
+ 	struct intel_fbdev *fbdev;
+ } drm_i915_private_t;
+@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
+ 	struct drm_mm_node *gtt_space;
+ 
+ 	/** This object's place on the active/flushing/inactive lists */
+-	struct list_head list;
++	struct list_head ring_list;
++	struct list_head mm_list;
+ 	/** This object's place on GPU write list */
+ 	struct list_head gpu_write_list;
+ 	/** This object's place on eviction list */
+@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
+ 	/** global list entry for this request */
+ 	struct list_head list;
+ 
++	struct drm_i915_file_private *file_priv;
+ 	/** file_priv list entry for this request */
+ 	struct list_head client_list;
+ };
+ 
+ struct drm_i915_file_private {
+ 	struct {
++		struct spinlock lock;
+ 		struct list_head request_list;
+ 	} mm;
+ };
+@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ extern int i915_emit_box(struct drm_device *dev,
+ 			 struct drm_clip_rect *boxes,
+ 			 int i, int DR1, int DR4);
+-extern int i965_reset(struct drm_device *dev, u8 flags);
++extern int i915_reset(struct drm_device *dev, u8 flags);
+ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+ 
+ /* i915_irq.c */
+ void i915_hangcheck_elapsed(unsigned long data);
+-void i915_destroy_error_state(struct drm_device *dev);
+ extern int i915_irq_emit(struct drm_device *dev, void *data,
+ 			 struct drm_file *file_priv);
+ extern int i915_irq_wait(struct drm_device *dev, void *data,
+@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+ 
+ void intel_enable_asle (struct drm_device *dev);
+ 
++#ifdef CONFIG_DEBUG_FS
++extern void i915_destroy_error_state(struct drm_device *dev);
++#else
++#define i915_destroy_error_state(x)
++#endif
++
+ 
+ /* i915_mem.c */
+ extern int i915_mem_alloc(struct drm_device *dev, void *data,
+@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
+ extern void i915_mem_release(struct drm_device * dev,
+ 			     struct drm_file *file_priv, struct mem_block *heap);
+ /* i915_gem.c */
++int i915_gem_check_is_wedged(struct drm_device *dev);
+ int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv);
+ int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
+ int i915_gem_object_unbind(struct drm_gem_object *obj);
+ void i915_gem_release_mmap(struct drm_gem_object *obj);
+ void i915_gem_lastclose(struct drm_device *dev);
+-uint32_t i915_get_gem_seqno(struct drm_device *dev,
+-		struct intel_ring_buffer *ring);
+-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
+-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
+-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
++
++/**
++ * Returns true if seq1 is later than seq2.
++ */
++static inline bool
++i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++{
++	return (int32_t)(seq1 - seq2) >= 0;
++}
++
++int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
++				  bool interruptible);
++int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
++				  bool interruptible);
+ void i915_gem_retire_requests(struct drm_device *dev);
+-void i915_gem_retire_work_handler(struct work_struct *work);
++void i915_gem_reset(struct drm_device *dev);
+ void i915_gem_clflush_object(struct drm_gem_object *obj);
+ int i915_gem_object_set_domain(struct drm_gem_object *obj,
+ 			       uint32_t read_domains,
+@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ int i915_gpu_idle(struct drm_device *dev);
+ int i915_gem_idle(struct drm_device *dev);
+ uint32_t i915_add_request(struct drm_device *dev,
+-		struct drm_file *file_priv,
+-		uint32_t flush_domains,
+-		struct intel_ring_buffer *ring);
++			  struct drm_file *file_priv,
++			  struct drm_i915_gem_request *request,
++			  struct intel_ring_buffer *ring);
+ int i915_do_wait_request(struct drm_device *dev,
+-		uint32_t seqno, int interruptible,
+-		struct intel_ring_buffer *ring);
++			 uint32_t seqno,
++			 bool interruptible,
++			 struct intel_ring_buffer *ring);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ 				      int write);
+-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
++int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
++					 bool pipelined);
+ int i915_gem_attach_phys_object(struct drm_device *dev,
+ 				struct drm_gem_object *obj,
+ 				int id,
+@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
+ void i915_gem_detach_phys_object(struct drm_device *dev,
+ 				 struct drm_gem_object *obj);
+ void i915_gem_free_all_phys_object(struct drm_device *dev);
+-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+-void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+ 
+ void i915_gem_shrinker_init(void);
+ void i915_gem_shrinker_exit(void);
+@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
+ /* i915_gem_debug.c */
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ 			  const char *where, uint32_t mark);
+-#if WATCH_INACTIVE
+-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
++#if WATCH_LISTS
++int i915_verify_lists(struct drm_device *dev);
+ #else
+-#define i915_verify_inactive(dev, file, line)
++#define i915_verify_lists(dev) 0
+ #endif
+ void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ 			  const char *where, uint32_t mark);
+-void i915_dump_lru(struct drm_device *dev, const char *where);
+ 
+ /* i915_debugfs.c */
+ int i915_debugfs_init(struct drm_minor *minor);
+@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
+ extern int i915_save_state(struct drm_device *dev);
+ extern int i915_restore_state(struct drm_device *dev);
+ 
++/* intel_i2c.c */
++extern int intel_setup_gmbus(struct drm_device *dev);
++extern void intel_teardown_gmbus(struct drm_device *dev);
++extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
++extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
++extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++{
++	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
++}
++extern void intel_i2c_reset(struct drm_device *dev);
++
++/* intel_opregion.c */
++extern int intel_opregion_setup(struct drm_device *dev);
+ #ifdef CONFIG_ACPI
+-/* i915_opregion.c */
+-extern int intel_opregion_init(struct drm_device *dev, int resume);
+-extern void intel_opregion_free(struct drm_device *dev, int suspend);
+-extern void opregion_asle_intr(struct drm_device *dev);
+-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
+-extern void opregion_enable_asle(struct drm_device *dev);
++extern void intel_opregion_init(struct drm_device *dev);
++extern void intel_opregion_fini(struct drm_device *dev);
++extern void intel_opregion_asle_intr(struct drm_device *dev);
++extern void intel_opregion_gse_intr(struct drm_device *dev);
++extern void intel_opregion_enable_asle(struct drm_device *dev);
+ #else
+-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
+-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
+-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
+-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
++static inline void intel_opregion_init(struct drm_device *dev) { return; }
++static inline void intel_opregion_fini(struct drm_device *dev) { return; }
++static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
++static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
++static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
+ #endif
+ 
++/* intel_acpi.c */
++#ifdef CONFIG_ACPI
++extern void intel_register_dsm_handler(void);
++extern void intel_unregister_dsm_handler(void);
++#else
++static inline void intel_register_dsm_handler(void) { return; }
++static inline void intel_unregister_dsm_handler(void) { return; }
++#endif /* CONFIG_ACPI */
++
+ /* modesetting */
+ extern void intel_modeset_init(struct drm_device *dev);
+ extern void intel_modeset_cleanup(struct drm_device *dev);
+@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
+ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+ 
+ /* overlay */
++#ifdef CONFIG_DEBUG_FS
+ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
++#endif
+ 
+ /**
+  * Lock test for when it's just for synchronization of ring access.
+@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
+ } while (0)
+ 
+-#define I915_READ(reg)          readl(dev_priv->regs + (reg))
+-#define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
++static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
++{
++	u32 val;
++
++	val = readl(dev_priv->regs + reg);
++	if (dev_priv->debug_flags & I915_DEBUG_READ)
++		printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
++	return val;
++}
++
++static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
++			      u32 val)
++{
++	writel(val, dev_priv->regs + reg);
++	if (dev_priv->debug_flags & I915_DEBUG_WRITE)
++		printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
++}
++
++#define I915_READ(reg)          i915_read(dev_priv, (reg))
++#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
+ #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
+ #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
+ #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
+@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define POSTING_READ(reg)	(void)I915_READ(reg)
+ #define POSTING_READ16(reg)	(void)I915_READ16(reg)
+ 
++#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
++				I915_DEBUG_WRITE)
++#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
++							    I915_DEBUG_WRITE))
++
+ #define I915_VERBOSE 0
+ 
+ #define BEGIN_LP_RING(n)  do { \
+@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
+ #define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+ #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
+-#define IS_I965G(dev)		(INTEL_INFO(dev)->is_i965g)
+-#define IS_I965GM(dev)		(INTEL_INFO(dev)->is_i965gm)
+ #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
+ #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
+ #define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
+ #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+ #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+-#define IS_IRONLAKE(dev)	(INTEL_INFO(dev)->is_ironlake)
+-#define IS_I9XX(dev)		(INTEL_INFO(dev)->is_i9xx)
+ #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+ 
+ #define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
+@@ -1188,36 +1290,38 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
+ #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
+ 
+-#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
++#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
++#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+ #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+ 
++#define HAS_OVERLAY(dev) 		(INTEL_INFO(dev)->has_overlay)
++#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
++
+ /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+  * rows, which changed the alignment requirements and fence programming.
+  */
+-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
++#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ 						      IS_I915GM(dev)))
+-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
+-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
++#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
++#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
++#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+ #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+-#define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
+-					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+-					!IS_GEN6(dev))
++#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
+ #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+ /* dsparb controlled by hw only */
+ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+ 
+-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
++#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+ #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+ #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+ #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+ 
+-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||	\
+-			    IS_GEN6(dev))
+-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
++#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
++#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+ 
+ #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+ #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
++#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+ 
+ #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 90b1d67..6da2c6d 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -37,7 +37,9 @@
+ #include <linux/intel-gtt.h>
+ 
+ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
++
++static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
++						  bool pipelined);
+ static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+ static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ 						     uint64_t offset,
+ 						     uint64_t size);
+ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
+-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
++					  bool interruptible);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+ 					   unsigned alignment);
+ static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
+ 				struct drm_file *file_priv);
+ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+ 
++static int
++i915_gem_object_get_pages(struct drm_gem_object *obj,
++			  gfp_t gfpmask);
++
++static void
++i915_gem_object_put_pages(struct drm_gem_object *obj);
++
+ static LIST_HEAD(shrink_list);
+ static DEFINE_SPINLOCK(shrink_list_lock);
+ 
++/* some bookkeeping */
++static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
++				  size_t size)
++{
++	dev_priv->mm.object_count++;
++	dev_priv->mm.object_memory += size;
++}
++
++static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
++				     size_t size)
++{
++	dev_priv->mm.object_count--;
++	dev_priv->mm.object_memory -= size;
++}
++
++static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
++				  size_t size)
++{
++	dev_priv->mm.gtt_count++;
++	dev_priv->mm.gtt_memory += size;
++}
++
++static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
++				     size_t size)
++{
++	dev_priv->mm.gtt_count--;
++	dev_priv->mm.gtt_memory -= size;
++}
++
++static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
++				  size_t size)
++{
++	dev_priv->mm.pin_count++;
++	dev_priv->mm.pin_memory += size;
++}
++
++static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
++				     size_t size)
++{
++	dev_priv->mm.pin_count--;
++	dev_priv->mm.pin_memory -= size;
++}
++
++int
++i915_gem_check_is_wedged(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct completion *x = &dev_priv->error_completion;
++	unsigned long flags;
++	int ret;
++
++	if (!atomic_read(&dev_priv->mm.wedged))
++		return 0;
++
++	ret = wait_for_completion_interruptible(x);
++	if (ret)
++		return ret;
++
++	/* Success, we reset the GPU! */
++	if (!atomic_read(&dev_priv->mm.wedged))
++		return 0;
++
++	/* GPU is hung, bump the completion count to account for
++	 * the token we just consumed so that we never hit zero and
++	 * end up waiting upon a subsequent completion event that
++	 * will never happen.
++	 */
++	spin_lock_irqsave(&x->wait.lock, flags);
++	x->done++;
++	spin_unlock_irqrestore(&x->wait.lock, flags);
++	return -EIO;
++}
++
++static int i915_mutex_lock_interruptible(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret;
++
++	ret = i915_gem_check_is_wedged(dev);
++	if (ret)
++		return ret;
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
++
++	if (atomic_read(&dev_priv->mm.wedged)) {
++		mutex_unlock(&dev->struct_mutex);
++		return -EAGAIN;
++	}
++
++	WARN_ON(i915_verify_lists(dev));
++	return 0;
++}
++
+ static inline bool
+ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+ {
+@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+ 		obj_priv->pin_count == 0;
+ }
+ 
+-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
++int i915_gem_do_init(struct drm_device *dev,
++		     unsigned long start,
+ 		     unsigned long end)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ 	drm_mm_init(&dev_priv->mm.gtt_space, start,
+ 		    end - start);
+ 
+-	dev->gtt_total = (uint32_t) (end - start);
++	dev_priv->mm.gtt_total = end - start;
+ 
+ 	return 0;
+ }
+@@ -103,14 +209,16 @@ int
+ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ 			    struct drm_file *file_priv)
+ {
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_get_aperture *args = data;
+ 
+ 	if (!(dev->driver->driver_features & DRIVER_GEM))
+ 		return -ENODEV;
+ 
+-	args->aper_size = dev->gtt_total;
+-	args->aper_available_size = (args->aper_size -
+-				     atomic_read(&dev->pin_memory));
++	mutex_lock(&dev->struct_mutex);
++	args->aper_size = dev_priv->mm.gtt_total;
++	args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
++	mutex_unlock(&dev->struct_mutex);
+ 
+ 	return 0;
+ }
+@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ 		return -ENOMEM;
+ 
+ 	ret = drm_gem_handle_create(file_priv, obj, &handle);
+-	/* drop reference from allocate - handle holds it now */
+-	drm_gem_object_unreference_unlocked(obj);
+ 	if (ret) {
++		drm_gem_object_release(obj);
++		i915_gem_info_remove_obj(dev->dev_private, obj->size);
++		kfree(obj);
+ 		return ret;
+ 	}
+ 
++	/* drop reference from allocate - handle holds it now */
++	drm_gem_object_unreference(obj);
++	trace_i915_gem_object_create(obj);
++
+ 	args->handle = handle;
+ 	return 0;
+ }
+@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
+ 		char __user *data,
+ 		int length)
+ {
+-	char __iomem *vaddr;
+-	int unwritten;
++	char *vaddr;
++	int ret;
+ 
+ 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+-	if (vaddr == NULL)
+-		return -ENOMEM;
+-	unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
++	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ 	kunmap_atomic(vaddr, KM_USER0);
+ 
+-	if (unwritten)
+-		return -EFAULT;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 	loff_t offset, page_base;
+ 	char __user *user_data;
+ 	int page_offset, page_length;
+-	int ret;
+ 
+ 	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	remain = args->size;
+ 
+-	mutex_lock(&dev->struct_mutex);
+-
+-	ret = i915_gem_object_get_pages(obj, 0);
+-	if (ret != 0)
+-		goto fail_unlock;
+-
+-	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+-							args->size);
+-	if (ret != 0)
+-		goto fail_put_pages;
+-
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = args->offset;
+ 
+@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 		if ((page_offset + remain) > PAGE_SIZE)
+ 			page_length = PAGE_SIZE - page_offset;
+ 
+-		ret = fast_shmem_read(obj_priv->pages,
+-				      page_base, page_offset,
+-				      user_data, page_length);
+-		if (ret)
+-			goto fail_put_pages;
++		if (fast_shmem_read(obj_priv->pages,
++				    page_base, page_offset,
++				    user_data, page_length))
++			return -EFAULT;
+ 
+ 		remain -= page_length;
+ 		user_data += page_length;
+ 		offset += page_length;
+ 	}
+ 
+-fail_put_pages:
+-	i915_gem_object_put_pages(obj);
+-fail_unlock:
+-	mutex_unlock(&dev->struct_mutex);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ static int
+@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ 	num_pages = last_data_page - first_data_page + 1;
+ 
+-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ 	if (user_pages == NULL)
+ 		return -ENOMEM;
+ 
++	mutex_unlock(&dev->struct_mutex);
+ 	down_read(&mm->mmap_sem);
+ 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ 				      num_pages, 1, 0, user_pages, NULL);
+ 	up_read(&mm->mmap_sem);
++	mutex_lock(&dev->struct_mutex);
+ 	if (pinned_pages < num_pages) {
+ 		ret = -EFAULT;
+-		goto fail_put_user_pages;
++		goto out;
+ 	}
+ 
+-	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+-
+-	mutex_lock(&dev->struct_mutex);
+-
+-	ret = i915_gem_object_get_pages_or_evict(obj);
++	ret = i915_gem_object_set_cpu_read_domain_range(obj,
++							args->offset,
++							args->size);
+ 	if (ret)
+-		goto fail_unlock;
++		goto out;
+ 
+-	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+-							args->size);
+-	if (ret != 0)
+-		goto fail_put_pages;
++	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ 
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = args->offset;
+@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 		offset += page_length;
+ 	}
+ 
+-fail_put_pages:
+-	i915_gem_object_put_pages(obj);
+-fail_unlock:
+-	mutex_unlock(&dev->struct_mutex);
+-fail_put_user_pages:
++out:
+ 	for (i = 0; i < pinned_pages; i++) {
+ 		SetPageDirty(user_pages[i]);
+ 		page_cache_release(user_pages[i]);
+@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_i915_gem_pread *args = data;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
+-	int ret;
++	int ret = 0;
++
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+-	if (obj == NULL)
+-		return -ENOENT;
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
+ 	obj_priv = to_intel_bo(obj);
+ 
+ 	/* Bounds check source.  */
+ 	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ 		ret = -EINVAL;
+-		goto err;
++		goto out;
+ 	}
+ 
++	if (args->size == 0)
++		goto out;
++
+ 	if (!access_ok(VERIFY_WRITE,
+ 		       (char __user *)(uintptr_t)args->data_ptr,
+ 		       args->size)) {
+ 		ret = -EFAULT;
+-		goto err;
++		goto out;
+ 	}
+ 
+-	if (i915_gem_object_needs_bit17_swizzle(obj)) {
+-		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+-	} else {
+-		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+-		if (ret != 0)
+-			ret = i915_gem_shmem_pread_slow(dev, obj, args,
+-							file_priv);
++	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
++				       args->size);
++	if (ret) {
++		ret = -EFAULT;
++		goto out;
+ 	}
+ 
+-err:
+-	drm_gem_object_unreference_unlocked(obj);
++	ret = i915_gem_object_get_pages_or_evict(obj);
++	if (ret)
++		goto out;
++
++	ret = i915_gem_object_set_cpu_read_domain_range(obj,
++							args->offset,
++							args->size);
++	if (ret)
++		goto out_put;
++
++	ret = -EFAULT;
++	if (!i915_gem_object_needs_bit17_swizzle(obj))
++		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
++	if (ret == -EFAULT)
++		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
++
++out_put:
++	i915_gem_object_put_pages(obj);
++out:
++	drm_gem_object_unreference(obj);
++unlock:
++	mutex_unlock(&dev->struct_mutex);
+ 	return ret;
+ }
+ 
+@@ -513,9 +623,7 @@ fast_user_write(struct io_mapping *mapping,
+ 	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ 						      user_data, length);
+ 	io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
+-	if (unwritten)
+-		return -EFAULT;
+-	return 0;
++	return unwritten;
+ }
+ 
+ /* Here's the write path which can sleep for
+@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
+ 		 char __user *data,
+ 		 int length)
+ {
+-	char __iomem *vaddr;
+-	unsigned long unwritten;
++	char *vaddr;
++	int ret;
+ 
+ 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+-	if (vaddr == NULL)
+-		return -ENOMEM;
+-	unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
++	ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ 	kunmap_atomic(vaddr, KM_USER0);
+ 
+-	if (unwritten)
+-		return -EFAULT;
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 	loff_t offset, page_base;
+ 	char __user *user_data;
+ 	int page_offset, page_length;
+-	int ret;
+ 
+ 	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	remain = args->size;
+ 
+-
+-	mutex_lock(&dev->struct_mutex);
+-	ret = i915_gem_object_pin(obj, 0);
+-	if (ret) {
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
+-	}
+-	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+-	if (ret)
+-		goto fail;
+-
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = obj_priv->gtt_offset + args->offset;
+ 
+@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 		if ((page_offset + remain) > PAGE_SIZE)
+ 			page_length = PAGE_SIZE - page_offset;
+ 
+-		ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
+-				       page_offset, user_data, page_length);
+-
+ 		/* If we get a fault while copying data, then (presumably) our
+ 		 * source page isn't available.  Return the error and we'll
+ 		 * retry in the slow path.
+ 		 */
+-		if (ret)
+-			goto fail;
++		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
++				    page_offset, user_data, page_length))
++
++			return -EFAULT;
+ 
+ 		remain -= page_length;
+ 		user_data += page_length;
+ 		offset += page_length;
+ 	}
+ 
+-fail:
+-	i915_gem_object_unpin(obj);
+-	mutex_unlock(&dev->struct_mutex);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ 	num_pages = last_data_page - first_data_page + 1;
+ 
+-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ 	if (user_pages == NULL)
+ 		return -ENOMEM;
+ 
++	mutex_unlock(&dev->struct_mutex);
+ 	down_read(&mm->mmap_sem);
+ 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ 				      num_pages, 0, 0, user_pages, NULL);
+ 	up_read(&mm->mmap_sem);
++	mutex_lock(&dev->struct_mutex);
+ 	if (pinned_pages < num_pages) {
+ 		ret = -EFAULT;
+ 		goto out_unpin_pages;
+ 	}
+ 
+-	mutex_lock(&dev->struct_mutex);
+-	ret = i915_gem_object_pin(obj, 0);
+-	if (ret)
+-		goto out_unlock;
+-
+ 	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ 	if (ret)
+-		goto out_unpin_object;
++		goto out_unpin_pages;
+ 
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = obj_priv->gtt_offset + args->offset;
+@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 		data_ptr += page_length;
+ 	}
+ 
+-out_unpin_object:
+-	i915_gem_object_unpin(obj);
+-out_unlock:
+-	mutex_unlock(&dev->struct_mutex);
+ out_unpin_pages:
+ 	for (i = 0; i < pinned_pages; i++)
+ 		page_cache_release(user_pages[i]);
+@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 	loff_t offset, page_base;
+ 	char __user *user_data;
+ 	int page_offset, page_length;
+-	int ret;
+ 
+ 	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	remain = args->size;
+ 
+-	mutex_lock(&dev->struct_mutex);
+-
+-	ret = i915_gem_object_get_pages(obj, 0);
+-	if (ret != 0)
+-		goto fail_unlock;
+-
+-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-	if (ret != 0)
+-		goto fail_put_pages;
+-
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = args->offset;
+ 	obj_priv->dirty = 1;
+@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ 		if ((page_offset + remain) > PAGE_SIZE)
+ 			page_length = PAGE_SIZE - page_offset;
+ 
+-		ret = fast_shmem_write(obj_priv->pages,
++		if (fast_shmem_write(obj_priv->pages,
+ 				       page_base, page_offset,
+-				       user_data, page_length);
+-		if (ret)
+-			goto fail_put_pages;
++				       user_data, page_length))
++			return -EFAULT;
+ 
+ 		remain -= page_length;
+ 		user_data += page_length;
+ 		offset += page_length;
+ 	}
+ 
+-fail_put_pages:
+-	i915_gem_object_put_pages(obj);
+-fail_unlock:
+-	mutex_unlock(&dev->struct_mutex);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ 	num_pages = last_data_page - first_data_page + 1;
+ 
+-	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ 	if (user_pages == NULL)
+ 		return -ENOMEM;
+ 
++	mutex_unlock(&dev->struct_mutex);
+ 	down_read(&mm->mmap_sem);
+ 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ 				      num_pages, 0, 0, user_pages, NULL);
+ 	up_read(&mm->mmap_sem);
++	mutex_lock(&dev->struct_mutex);
+ 	if (pinned_pages < num_pages) {
+ 		ret = -EFAULT;
+-		goto fail_put_user_pages;
++		goto out;
+ 	}
+ 
+-	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+-
+-	mutex_lock(&dev->struct_mutex);
+-
+-	ret = i915_gem_object_get_pages_or_evict(obj);
++	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ 	if (ret)
+-		goto fail_unlock;
++		goto out;
+ 
+-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-	if (ret != 0)
+-		goto fail_put_pages;
++	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ 
+ 	obj_priv = to_intel_bo(obj);
+ 	offset = args->offset;
+@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ 		offset += page_length;
+ 	}
+ 
+-fail_put_pages:
+-	i915_gem_object_put_pages(obj);
+-fail_unlock:
+-	mutex_unlock(&dev->struct_mutex);
+-fail_put_user_pages:
++out:
+ 	for (i = 0; i < pinned_pages; i++)
+ 		page_cache_release(user_pages[i]);
+ 	drm_free_large(user_pages);
+@@ -921,29 +976,46 @@ fail_put_user_pages:
+  */
+ int
+ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+-		      struct drm_file *file_priv)
++		      struct drm_file *file)
+ {
+ 	struct drm_i915_gem_pwrite *args = data;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	int ret = 0;
+ 
+-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+-	if (obj == NULL)
+-		return -ENOENT;
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
++
++	obj = drm_gem_object_lookup(dev, file, args->handle);
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
+ 	obj_priv = to_intel_bo(obj);
+ 
++
+ 	/* Bounds check destination. */
+ 	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ 		ret = -EINVAL;
+-		goto err;
++		goto out;
+ 	}
+ 
++	if (args->size == 0)
++		goto out;
++
+ 	if (!access_ok(VERIFY_READ,
+ 		       (char __user *)(uintptr_t)args->data_ptr,
+ 		       args->size)) {
+ 		ret = -EFAULT;
+-		goto err;
++		goto out;
++	}
++
++	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
++				      args->size);
++	if (ret) {
++		ret = -EFAULT;
++		goto out;
+ 	}
+ 
+ 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ 	 * perspective, requiring manual detiling by the client.
+ 	 */
+ 	if (obj_priv->phys_obj)
+-		ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
++		ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ 	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+-		 dev->gtt_total != 0 &&
++		 obj_priv->gtt_space &&
+ 		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
+-		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+-		if (ret == -EFAULT) {
+-			ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+-						       file_priv);
+-		}
+-	} else if (i915_gem_object_needs_bit17_swizzle(obj)) {
+-		ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
++		ret = i915_gem_object_pin(obj, 0);
++		if (ret)
++			goto out;
++
++		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++		if (ret)
++			goto out_unpin;
++
++		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
++		if (ret == -EFAULT)
++			ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
++
++out_unpin:
++		i915_gem_object_unpin(obj);
+ 	} else {
+-		ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+-		if (ret == -EFAULT) {
+-			ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+-							 file_priv);
+-		}
+-	}
++		ret = i915_gem_object_get_pages_or_evict(obj);
++		if (ret)
++			goto out;
+ 
+-#if WATCH_PWRITE
+-	if (ret)
+-		DRM_INFO("pwrite failed %d\n", ret);
+-#endif
++		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
++		if (ret)
++			goto out_put;
+ 
+-err:
+-	drm_gem_object_unreference_unlocked(obj);
++		ret = -EFAULT;
++		if (!i915_gem_object_needs_bit17_swizzle(obj))
++			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
++		if (ret == -EFAULT)
++			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
++
++out_put:
++		i915_gem_object_put_pages(obj);
++	}
++
++out:
++	drm_gem_object_unreference(obj);
++unlock:
++	mutex_unlock(&dev->struct_mutex);
+ 	return ret;
+ }
+ 
+@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 	if (write_domain != 0 && read_domains != write_domain)
+ 		return -EINVAL;
+ 
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
++
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+-	if (obj == NULL)
+-		return -ENOENT;
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
+ 	obj_priv = to_intel_bo(obj);
+ 
+-	mutex_lock(&dev->struct_mutex);
+-
+ 	intel_mark_busy(dev, obj);
+ 
+-#if WATCH_BUF
+-	DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
+-		 obj, obj->size, read_domains, write_domain);
+-#endif
+ 	if (read_domains & I915_GEM_DOMAIN_GTT) {
+ 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ 
+@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ 	}
+ 
+-	
+ 	/* Maintain LRU order of "inactive" objects */
+ 	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
+-		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ 
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return ret;
+ }
+@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_i915_gem_sw_finish *args = data;
+ 	struct drm_gem_object *obj;
+-	struct drm_i915_gem_object *obj_priv;
+ 	int ret = 0;
+ 
+ 	if (!(dev->driver->driver_features & DRIVER_GEM))
+ 		return -ENODEV;
+ 
+-	mutex_lock(&dev->struct_mutex);
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
++
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL) {
+-		mutex_unlock(&dev->struct_mutex);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto unlock;
+ 	}
+ 
+-#if WATCH_BUF
+-	DRM_INFO("%s: sw_finish %d (%p %zd)\n",
+-		 __func__, args->handle, obj, obj->size);
+-#endif
+-	obj_priv = to_intel_bo(obj);
+-
+ 	/* Pinned buffers may be scanout, so flush the cache */
+-	if (obj_priv->pin_count)
++	if (to_intel_bo(obj)->pin_count)
+ 		i915_gem_object_flush_cpu_write_domain(obj);
+ 
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return ret;
+ }
+@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 
+ 	/* Need a new fence register? */
+ 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+-		ret = i915_gem_object_get_fence_reg(obj);
++		ret = i915_gem_object_get_fence_reg(obj, true);
+ 		if (ret)
+ 			goto unlock;
+ 	}
+ 
+ 	if (i915_gem_object_is_inactive(obj_priv))
+-		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ 
+ 	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ 		page_offset;
+@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+ 						    obj->size / PAGE_SIZE, 0, 0);
+ 	if (!list->file_offset_node) {
+ 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+-		ret = -ENOMEM;
++		ret = -ENOSPC;
+ 		goto out_free_list;
+ 	}
+ 
+@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+ 	}
+ 
+ 	list->hash.key = list->file_offset_node->start;
+-	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
++	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
++	if (ret) {
+ 		DRM_ERROR("failed to add to map hash\n");
+-		ret = -ENOMEM;
+ 		goto out_free_mm;
+ 	}
+ 
+@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+ 	 * Minimum alignment is 4k (GTT page size), but might be greater
+ 	 * if a fence register is needed for the object.
+ 	 */
+-	if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
++	if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ 		return 4096;
+ 
+ 	/*
+ 	 * Previous chips need to be aligned to the size of the smallest
+ 	 * fence register that can contain the object.
+ 	 */
+-	if (IS_I9XX(dev))
++	if (INTEL_INFO(dev)->gen == 3)
+ 		start = 1024*1024;
+ 	else
+ 		start = 512*1024;
+@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ 	if (!(dev->driver->driver_features & DRIVER_GEM))
+ 		return -ENODEV;
+ 
+-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+-	if (obj == NULL)
+-		return -ENOENT;
+-
+-	mutex_lock(&dev->struct_mutex);
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
+ 
++	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
+ 	obj_priv = to_intel_bo(obj);
+ 
+ 	if (obj_priv->madv != I915_MADV_WILLNEED) {
+ 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+-		drm_gem_object_unreference(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+-
+ 	if (!obj_priv->mmap_offset) {
+ 		ret = i915_gem_create_mmap_offset(obj);
+-		if (ret) {
+-			drm_gem_object_unreference(obj);
+-			mutex_unlock(&dev->struct_mutex);
+-			return ret;
+-		}
++		if (ret)
++			goto out;
+ 	}
+ 
+ 	args->offset = obj_priv->mmap_offset;
+@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ 	 */
+ 	if (!obj_priv->agp_mem) {
+ 		ret = i915_gem_object_bind_to_gtt(obj, 0);
+-		if (ret) {
+-			drm_gem_object_unreference(obj);
+-			mutex_unlock(&dev->struct_mutex);
+-			return ret;
+-		}
++		if (ret)
++			goto out;
+ 	}
+ 
++out:
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-
+-	return 0;
++	return ret;
+ }
+ 
+-void
++static void
+ i915_gem_object_put_pages(struct drm_gem_object *obj)
+ {
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
+ 	obj_priv->pages = NULL;
+ }
+ 
++static uint32_t
++i915_gem_next_request_seqno(struct drm_device *dev,
++			    struct intel_ring_buffer *ring)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	ring->outstanding_lazy_request = true;
++	return dev_priv->next_seqno;
++}
++
+ static void
+-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
++i915_gem_object_move_to_active(struct drm_gem_object *obj,
+ 			       struct intel_ring_buffer *ring)
+ {
+ 	struct drm_device *dev = obj->dev;
+-	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++	uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
++
+ 	BUG_ON(ring == NULL);
+ 	obj_priv->ring = ring;
+ 
+@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ 		drm_gem_object_reference(obj);
+ 		obj_priv->active = 1;
+ 	}
++
+ 	/* Move from whatever list we were on to the tail of execution. */
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	list_move_tail(&obj_priv->list, &ring->active_list);
+-	spin_unlock(&dev_priv->mm.active_list_lock);
++	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
++	list_move_tail(&obj_priv->ring_list, &ring->active_list);
+ 	obj_priv->last_rendering_seqno = seqno;
+ }
+ 
+@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 
+ 	BUG_ON(!obj_priv->active);
+-	list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
++	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
++	list_del_init(&obj_priv->ring_list);
+ 	obj_priv->last_rendering_seqno = 0;
+ }
+ 
+@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+ 	if (obj_priv->pin_count != 0)
+-		list_del_init(&obj_priv->list);
++		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
+ 	else
+-		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
++	list_del_init(&obj_priv->ring_list);
+ 
+ 	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+ 
+@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+ 		obj_priv->active = 0;
+ 		drm_gem_object_unreference(obj);
+ 	}
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	WARN_ON(i915_verify_lists(dev));
+ }
+ 
+ static void
+ i915_gem_process_flushing_list(struct drm_device *dev,
+-			       uint32_t flush_domains, uint32_t seqno,
++			       uint32_t flush_domains,
+ 			       struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv, *next;
+ 
+ 	list_for_each_entry_safe(obj_priv, next,
+-				 &dev_priv->mm.gpu_write_list,
++				 &ring->gpu_write_list,
+ 				 gpu_write_list) {
+ 		struct drm_gem_object *obj = &obj_priv->base;
+ 
+-		if ((obj->write_domain & flush_domains) ==
+-		    obj->write_domain &&
+-		    obj_priv->ring->ring_flag == ring->ring_flag) {
++		if (obj->write_domain & flush_domains) {
+ 			uint32_t old_write_domain = obj->write_domain;
+ 
+ 			obj->write_domain = 0;
+ 			list_del_init(&obj_priv->gpu_write_list);
+-			i915_gem_object_move_to_active(obj, seqno, ring);
++			i915_gem_object_move_to_active(obj, ring);
+ 
+ 			/* update the fence lru list */
+ 			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
+ }
+ 
+ uint32_t
+-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+-		 uint32_t flush_domains, struct intel_ring_buffer *ring)
++i915_add_request(struct drm_device *dev,
++		 struct drm_file *file,
++		 struct drm_i915_gem_request *request,
++		 struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_file_private *i915_file_priv = NULL;
+-	struct drm_i915_gem_request *request;
++	struct drm_i915_file_private *file_priv = NULL;
+ 	uint32_t seqno;
+ 	int was_empty;
+ 
+-	if (file_priv != NULL)
+-		i915_file_priv = file_priv->driver_priv;
++	if (file != NULL)
++		file_priv = file->driver_priv;
+ 
+-	request = kzalloc(sizeof(*request), GFP_KERNEL);
+-	if (request == NULL)
+-		return 0;
++	if (request == NULL) {
++		request = kzalloc(sizeof(*request), GFP_KERNEL);
++		if (request == NULL)
++			return 0;
++	}
+ 
+-	seqno = ring->add_request(dev, ring, file_priv, flush_domains);
++	seqno = ring->add_request(dev, ring, 0);
++	ring->outstanding_lazy_request = false;
+ 
+ 	request->seqno = seqno;
+ 	request->ring = ring;
+@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ 	was_empty = list_empty(&ring->request_list);
+ 	list_add_tail(&request->list, &ring->request_list);
+ 
+-	if (i915_file_priv) {
++	if (file_priv) {
++		spin_lock(&file_priv->mm.lock);
++		request->file_priv = file_priv;
+ 		list_add_tail(&request->client_list,
+-			      &i915_file_priv->mm.request_list);
+-	} else {
+-		INIT_LIST_HEAD(&request->client_list);
++			      &file_priv->mm.request_list);
++		spin_unlock(&file_priv->mm.lock);
+ 	}
+ 
+-	/* Associate any objects on the flushing list matching the write
+-	 * domain we're flushing with our flush.
+-	 */
+-	if (flush_domains != 0) 
+-		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
+-
+ 	if (!dev_priv->mm.suspended) {
+-		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
++		mod_timer(&dev_priv->hangcheck_timer,
++			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ 		if (was_empty)
+-			queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
++			queue_delayed_work(dev_priv->wq,
++					   &dev_priv->mm.retire_work, HZ);
+ 	}
+ 	return seqno;
+ }
+@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+  * Ensures that all commands in the ring are finished
+  * before signalling the CPU
+  */
+-static uint32_t
++static void
+ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
+ {
+ 	uint32_t flush_domains = 0;
+ 
+ 	/* The sampler always gets flushed on i965 (sigh) */
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+ 
+ 	ring->flush(dev, ring,
+ 			I915_GEM_DOMAIN_COMMAND, flush_domains);
+-	return flush_domains;
+ }
+ 
+-/**
+- * Moves buffers associated only with the given active seqno from the active
+- * to inactive list, potentially freeing them.
+- */
+-static void
+-i915_gem_retire_request(struct drm_device *dev,
+-			struct drm_i915_gem_request *request)
++static inline void
++i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+ {
+-	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_file_private *file_priv = request->file_priv;
+ 
+-	trace_i915_gem_request_retire(dev, request->seqno);
++	if (!file_priv)
++		return;
+ 
+-	/* Move any buffers on the active list that are no longer referenced
+-	 * by the ringbuffer to the flushing/inactive lists as appropriate.
+-	 */
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	while (!list_empty(&request->ring->active_list)) {
+-		struct drm_gem_object *obj;
+-		struct drm_i915_gem_object *obj_priv;
++	spin_lock(&file_priv->mm.lock);
++	list_del(&request->client_list);
++	request->file_priv = NULL;
++	spin_unlock(&file_priv->mm.lock);
++}
+ 
+-		obj_priv = list_first_entry(&request->ring->active_list,
+-					    struct drm_i915_gem_object,
+-					    list);
+-		obj = &obj_priv->base;
++static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
++				      struct intel_ring_buffer *ring)
++{
++	while (!list_empty(&ring->request_list)) {
++		struct drm_i915_gem_request *request;
+ 
+-		/* If the seqno being retired doesn't match the oldest in the
+-		 * list, then the oldest in the list must still be newer than
+-		 * this seqno.
+-		 */
+-		if (obj_priv->last_rendering_seqno != request->seqno)
+-			goto out;
++		request = list_first_entry(&ring->request_list,
++					   struct drm_i915_gem_request,
++					   list);
+ 
+-#if WATCH_LRU
+-		DRM_INFO("%s: retire %d moves to inactive list %p\n",
+-			 __func__, request->seqno, obj);
+-#endif
++		list_del(&request->list);
++		i915_gem_request_remove_from_client(request);
++		kfree(request);
++	}
+ 
+-		if (obj->write_domain != 0)
+-			i915_gem_object_move_to_flushing(obj);
+-		else {
+-			/* Take a reference on the object so it won't be
+-			 * freed while the spinlock is held.  The list
+-			 * protection for this spinlock is safe when breaking
+-			 * the lock like this since the next thing we do
+-			 * is just get the head of the list again.
+-			 */
+-			drm_gem_object_reference(obj);
+-			i915_gem_object_move_to_inactive(obj);
+-			spin_unlock(&dev_priv->mm.active_list_lock);
+-			drm_gem_object_unreference(obj);
+-			spin_lock(&dev_priv->mm.active_list_lock);
+-		}
++	while (!list_empty(&ring->active_list)) {
++		struct drm_i915_gem_object *obj_priv;
++
++		obj_priv = list_first_entry(&ring->active_list,
++					    struct drm_i915_gem_object,
++					    ring_list);
++
++		obj_priv->base.write_domain = 0;
++		list_del_init(&obj_priv->gpu_write_list);
++		i915_gem_object_move_to_inactive(&obj_priv->base);
+ 	}
+-out:
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+ }
+ 
+-/**
+- * Returns true if seq1 is later than seq2.
+- */
+-bool
+-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++void i915_gem_reset(struct drm_device *dev)
+ {
+-	return (int32_t)(seq1 - seq2) >= 0;
+-}
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj_priv;
++	int i;
+ 
+-uint32_t
+-i915_get_gem_seqno(struct drm_device *dev,
+-		   struct intel_ring_buffer *ring)
+-{
+-	return ring->get_gem_seqno(dev, ring);
++	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
++	i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
++	i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
++
++	/* Remove anything from the flushing lists. The GPU cache is likely
++	 * to be lost on reset along with the data, so simply move the
++	 * lost bo to the inactive list.
++	 */
++	while (!list_empty(&dev_priv->mm.flushing_list)) {
++		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
++					    struct drm_i915_gem_object,
++					    mm_list);
++
++		obj_priv->base.write_domain = 0;
++		list_del_init(&obj_priv->gpu_write_list);
++		i915_gem_object_move_to_inactive(&obj_priv->base);
++	}
++
++	/* Move everything out of the GPU domains to ensure we do any
++	 * necessary invalidation upon reuse.
++	 */
++	list_for_each_entry(obj_priv,
++			    &dev_priv->mm.inactive_list,
++			    mm_list)
++	{
++		obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
++	}
++
++	/* The fence registers are invalidated so clear them out */
++	for (i = 0; i < 16; i++) {
++		struct drm_i915_fence_reg *reg;
++
++		reg = &dev_priv->fence_regs[i];
++		if (!reg->obj)
++			continue;
++
++		i915_gem_clear_fence_reg(reg->obj);
++	}
+ }
+ 
+ /**
+@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	uint32_t seqno;
+ 
+-	if (!ring->status_page.page_addr
+-			|| list_empty(&ring->request_list))
++	if (!ring->status_page.page_addr ||
++	    list_empty(&ring->request_list))
+ 		return;
+ 
+-	seqno = i915_get_gem_seqno(dev, ring);
++	WARN_ON(i915_verify_lists(dev));
+ 
++	seqno = ring->get_seqno(dev, ring);
+ 	while (!list_empty(&ring->request_list)) {
+ 		struct drm_i915_gem_request *request;
+-		uint32_t retiring_seqno;
+ 
+ 		request = list_first_entry(&ring->request_list,
+ 					   struct drm_i915_gem_request,
+ 					   list);
+-		retiring_seqno = request->seqno;
+ 
+-		if (i915_seqno_passed(seqno, retiring_seqno) ||
+-		    atomic_read(&dev_priv->mm.wedged)) {
+-			i915_gem_retire_request(dev, request);
++		if (!i915_seqno_passed(seqno, request->seqno))
++			break;
++
++		trace_i915_gem_request_retire(dev, request->seqno);
++
++		list_del(&request->list);
++		i915_gem_request_remove_from_client(request);
++		kfree(request);
++	}
++
++	/* Move any buffers on the active list that are no longer referenced
++	 * by the ringbuffer to the flushing/inactive lists as appropriate.
++	 */
++	while (!list_empty(&ring->active_list)) {
++		struct drm_gem_object *obj;
++		struct drm_i915_gem_object *obj_priv;
++
++		obj_priv = list_first_entry(&ring->active_list,
++					    struct drm_i915_gem_object,
++					    ring_list);
+ 
+-			list_del(&request->list);
+-			list_del(&request->client_list);
+-			kfree(request);
+-		} else
++		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ 			break;
++
++		obj = &obj_priv->base;
++		if (obj->write_domain != 0)
++			i915_gem_object_move_to_flushing(obj);
++		else
++			i915_gem_object_move_to_inactive(obj);
+ 	}
+ 
+ 	if (unlikely (dev_priv->trace_irq_seqno &&
+ 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+-
+ 		ring->user_irq_put(dev, ring);
+ 		dev_priv->trace_irq_seqno = 0;
+ 	}
++
++	WARN_ON(i915_verify_lists(dev));
+ }
+ 
+ void
+@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
+ 	     */
+ 	    list_for_each_entry_safe(obj_priv, tmp,
+ 				     &dev_priv->mm.deferred_free_list,
+-				     list)
++				     mm_list)
+ 		    i915_gem_free_object_tail(&obj_priv->base);
+ 	}
+ 
+ 	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+-	if (HAS_BSD(dev))
+-		i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
++	i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
++	i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ }
+ 
+-void
++static void
+ i915_gem_retire_work_handler(struct work_struct *work)
+ {
+ 	drm_i915_private_t *dev_priv;
+@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
+ 				mm.retire_work.work);
+ 	dev = dev_priv->dev;
+ 
+-	mutex_lock(&dev->struct_mutex);
++	/* Come back later if the device is busy... */
++	if (!mutex_trylock(&dev->struct_mutex)) {
++		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
++		return;
++	}
++
+ 	i915_gem_retire_requests(dev);
+ 
+ 	if (!dev_priv->mm.suspended &&
+ 		(!list_empty(&dev_priv->render_ring.request_list) ||
+-			(HAS_BSD(dev) &&
+-			 !list_empty(&dev_priv->bsd_ring.request_list))))
++		 !list_empty(&dev_priv->bsd_ring.request_list) ||
++		 !list_empty(&dev_priv->blt_ring.request_list)))
+ 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ 	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+ int
+ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+-		int interruptible, struct intel_ring_buffer *ring)
++		     bool interruptible, struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	u32 ier;
+@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ 	BUG_ON(seqno == 0);
+ 
+ 	if (atomic_read(&dev_priv->mm.wedged))
+-		return -EIO;
++		return -EAGAIN;
+ 
+-	if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
++	if (ring->outstanding_lazy_request) {
++		seqno = i915_add_request(dev, NULL, NULL, ring);
++		if (seqno == 0)
++			return -ENOMEM;
++	}
++	BUG_ON(seqno == dev_priv->next_seqno);
++
++	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ 		if (HAS_PCH_SPLIT(dev))
+ 			ier = I915_READ(DEIER) | I915_READ(GTIER);
+ 		else
+@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ 		if (interruptible)
+ 			ret = wait_event_interruptible(ring->irq_queue,
+ 				i915_seqno_passed(
+-					ring->get_gem_seqno(dev, ring), seqno)
++					ring->get_seqno(dev, ring), seqno)
+ 				|| atomic_read(&dev_priv->mm.wedged));
+ 		else
+ 			wait_event(ring->irq_queue,
+ 				i915_seqno_passed(
+-					ring->get_gem_seqno(dev, ring), seqno)
++					ring->get_seqno(dev, ring), seqno)
+ 				|| atomic_read(&dev_priv->mm.wedged));
+ 
+ 		ring->user_irq_put(dev, ring);
+@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ 		trace_i915_gem_request_wait_end(dev, seqno);
+ 	}
+ 	if (atomic_read(&dev_priv->mm.wedged))
+-		ret = -EIO;
++		ret = -EAGAIN;
+ 
+ 	if (ret && ret != -ERESTARTSYS)
+-		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+-			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
++		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
++			  __func__, ret, seqno, ring->get_seqno(dev, ring),
++			  dev_priv->next_seqno);
+ 
+ 	/* Directly dispatch request retiring.  While we have the work queue
+ 	 * to handle this, the waiter on a request often wants an associated
+@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+  */
+ static int
+ i915_wait_request(struct drm_device *dev, uint32_t seqno,
+-		struct intel_ring_buffer *ring)
++		  struct intel_ring_buffer *ring)
+ {
+ 	return i915_do_wait_request(dev, seqno, 1, ring);
+ }
+ 
+ static void
++i915_gem_flush_ring(struct drm_device *dev,
++		    struct drm_file *file_priv,
++		    struct intel_ring_buffer *ring,
++		    uint32_t invalidate_domains,
++		    uint32_t flush_domains)
++{
++	ring->flush(dev, ring, invalidate_domains, flush_domains);
++	i915_gem_process_flushing_list(dev, flush_domains, ring);
++}
++
++static void
+ i915_gem_flush(struct drm_device *dev,
++	       struct drm_file *file_priv,
+ 	       uint32_t invalidate_domains,
+-	       uint32_t flush_domains)
++	       uint32_t flush_domains,
++	       uint32_t flush_rings)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++
+ 	if (flush_domains & I915_GEM_DOMAIN_CPU)
+ 		drm_agp_chipset_flush(dev);
+-	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+-			invalidate_domains,
+-			flush_domains);
+-
+-	if (HAS_BSD(dev))
+-		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+-				invalidate_domains,
+-				flush_domains);
++
++	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
++		if (flush_rings & RING_RENDER)
++			i915_gem_flush_ring(dev, file_priv,
++					    &dev_priv->render_ring,
++					    invalidate_domains, flush_domains);
++		if (flush_rings & RING_BSD)
++			i915_gem_flush_ring(dev, file_priv,
++					    &dev_priv->bsd_ring,
++					    invalidate_domains, flush_domains);
++		if (flush_rings & RING_BLT)
++			i915_gem_flush_ring(dev, file_priv,
++					    &dev_priv->blt_ring,
++					    invalidate_domains, flush_domains);
++	}
+ }
+ 
+ /**
+@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
+  * safe to unbind from the GTT or access from the CPU.
+  */
+ static int
+-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
++i915_gem_object_wait_rendering(struct drm_gem_object *obj,
++			       bool interruptible)
+ {
+ 	struct drm_device *dev = obj->dev;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+ 	 * it.
+ 	 */
+ 	if (obj_priv->active) {
+-#if WATCH_BUF
+-		DRM_INFO("%s: object %p wait for seqno %08x\n",
+-			  __func__, obj, obj_priv->last_rendering_seqno);
+-#endif
+-		ret = i915_wait_request(dev,
+-				obj_priv->last_rendering_seqno, obj_priv->ring);
+-		if (ret != 0)
++		ret = i915_do_wait_request(dev,
++					   obj_priv->last_rendering_seqno,
++					   interruptible,
++					   obj_priv->ring);
++		if (ret)
+ 			return ret;
+ 	}
+ 
+@@ -1952,14 +2111,10 @@ int
+ i915_gem_object_unbind(struct drm_gem_object *obj)
+ {
+ 	struct drm_device *dev = obj->dev;
+-	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 	int ret = 0;
+ 
+-#if WATCH_BUF
+-	DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+-	DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+-#endif
+ 	if (obj_priv->gtt_space == NULL)
+ 		return 0;
+ 
+@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ 	 * should be safe and we need to cleanup or else we might
+ 	 * cause memory corruption through use-after-free.
+ 	 */
++	if (ret) {
++		i915_gem_clflush_object(obj);
++		obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
++	}
+ 
+ 	/* release the fence reg _after_ flushing */
+ 	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ 		i915_gem_clear_fence_reg(obj);
+ 
+-	if (obj_priv->agp_mem != NULL) {
+-		drm_unbind_agp(obj_priv->agp_mem);
+-		drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+-		obj_priv->agp_mem = NULL;
+-	}
++	drm_unbind_agp(obj_priv->agp_mem);
++	drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ 
+ 	i915_gem_object_put_pages(obj);
+ 	BUG_ON(obj_priv->pages_refcount);
+ 
+-	if (obj_priv->gtt_space) {
+-		atomic_dec(&dev->gtt_count);
+-		atomic_sub(obj->size, &dev->gtt_memory);
+-
+-		drm_mm_put_block(obj_priv->gtt_space);
+-		obj_priv->gtt_space = NULL;
+-	}
++	i915_gem_info_remove_gtt(dev_priv, obj->size);
++	list_del_init(&obj_priv->mm_list);
+ 
+-	/* Remove ourselves from the LRU list if present. */
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	if (!list_empty(&obj_priv->list))
+-		list_del_init(&obj_priv->list);
+-	spin_unlock(&dev_priv->mm.active_list_lock);
++	drm_mm_put_block(obj_priv->gtt_space);
++	obj_priv->gtt_space = NULL;
++	obj_priv->gtt_offset = 0;
+ 
+ 	if (i915_gem_object_is_purgeable(obj_priv))
+ 		i915_gem_object_truncate(obj);
+@@ -2020,48 +2169,48 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ 	return ret;
+ }
+ 
++static int i915_ring_idle(struct drm_device *dev,
++			  struct intel_ring_buffer *ring)
++{
++	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
++		return 0;
++
++	i915_gem_flush_ring(dev, NULL, ring,
++			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
++	return i915_wait_request(dev,
++				 i915_gem_next_request_seqno(dev, ring),
++				 ring);
++}
++
+ int
+ i915_gpu_idle(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	bool lists_empty;
+-	uint32_t seqno1, seqno2;
+ 	int ret;
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
+ 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+-		       list_empty(&dev_priv->render_ring.active_list) &&
+-		       (!HAS_BSD(dev) ||
+-			list_empty(&dev_priv->bsd_ring.active_list)));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-
++		       list_empty(&dev_priv->mm.active_list));
+ 	if (lists_empty)
+ 		return 0;
+ 
+ 	/* Flush everything onto the inactive list. */
+-	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+-	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+-			&dev_priv->render_ring);
+-	if (seqno1 == 0)
+-		return -ENOMEM;
+-	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+-
+-	if (HAS_BSD(dev)) {
+-		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+-				&dev_priv->bsd_ring);
+-		if (seqno2 == 0)
+-			return -ENOMEM;
++	ret = i915_ring_idle(dev, &dev_priv->render_ring);
++	if (ret)
++		return ret;
+ 
+-		ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+-		if (ret)
+-			return ret;
+-	}
++	ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
++	if (ret)
++		return ret;
+ 
++	ret = i915_ring_idle(dev, &dev_priv->blt_ring);
++	if (ret)
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+-int
++static int
+ i915_gem_object_get_pages(struct drm_gem_object *obj,
+ 			  gfp_t gfpmask)
+ {
+@@ -2241,7 +2390,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+ 	I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ }
+ 
+-static int i915_find_fence_reg(struct drm_device *dev)
++static int i915_find_fence_reg(struct drm_device *dev,
++			       bool interruptible)
+ {
+ 	struct drm_i915_fence_reg *reg = NULL;
+ 	struct drm_i915_gem_object *obj_priv = NULL;
+@@ -2286,7 +2436,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
+ 	 * private reference to obj like the other callers of put_fence_reg
+ 	 * (set_tiling ioctl) do. */
+ 	drm_gem_object_reference(obj);
+-	ret = i915_gem_object_put_fence_reg(obj);
++	ret = i915_gem_object_put_fence_reg(obj, interruptible);
+ 	drm_gem_object_unreference(obj);
+ 	if (ret != 0)
+ 		return ret;
+@@ -2308,7 +2458,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
+  * and tiling format.
+  */
+ int
+-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
++i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
++			      bool interruptible)
+ {
+ 	struct drm_device *dev = obj->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -2343,7 +2494,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+ 		break;
+ 	}
+ 
+-	ret = i915_find_fence_reg(dev);
++	ret = i915_find_fence_reg(dev, interruptible);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2421,15 +2572,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
+  * to the buffer to finish, and then resets the fence register.
+  * @obj: tiled object holding a fence register.
++ * @bool: whether the wait upon the fence is interruptible
+  *
+  * Zeroes out the fence register itself and clears out the associated
+  * data structures in dev_priv and obj_priv.
+  */
+ int
+-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
++i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
++			      bool interruptible)
+ {
+ 	struct drm_device *dev = obj->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++	struct drm_i915_fence_reg *reg;
+ 
+ 	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+ 		return 0;
+@@ -2444,20 +2599,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+ 	 * therefore we must wait for any outstanding access to complete
+ 	 * before clearing the fence.
+ 	 */
+-	if (!IS_I965G(dev)) {
++	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
++	if (reg->gpu) {
+ 		int ret;
+ 
+-		ret = i915_gem_object_flush_gpu_write_domain(obj);
+-		if (ret != 0)
++		ret = i915_gem_object_flush_gpu_write_domain(obj, true);
++		if (ret)
+ 			return ret;
+ 
+-		ret = i915_gem_object_wait_rendering(obj);
+-		if (ret != 0)
++		ret = i915_gem_object_wait_rendering(obj, interruptible);
++		if (ret)
+ 			return ret;
++
++		reg->gpu = false;
+ 	}
+ 
+ 	i915_gem_object_flush_gtt_write_domain(obj);
+-	i915_gem_clear_fence_reg (obj);
++	i915_gem_clear_fence_reg(obj);
+ 
+ 	return 0;
+ }
+@@ -2490,7 +2648,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 	/* If the object is bigger than the entire aperture, reject it early
+ 	 * before evicting everything in a vain attempt to find space.
+ 	 */
+-	if (obj->size > dev->gtt_total) {
++	if (obj->size > dev_priv->mm.gtt_total) {
+ 		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ 		return -E2BIG;
+ 	}
+@@ -2498,19 +2656,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+  search_free:
+ 	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ 					obj->size, alignment, 0);
+-	if (free_space != NULL) {
++	if (free_space != NULL)
+ 		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+ 						       alignment);
+-		if (obj_priv->gtt_space != NULL)
+-			obj_priv->gtt_offset = obj_priv->gtt_space->start;
+-	}
+ 	if (obj_priv->gtt_space == NULL) {
+ 		/* If the gtt is empty and we're still having trouble
+ 		 * fitting our object in, we're out of memory.
+ 		 */
+-#if WATCH_LRU
+-		DRM_INFO("%s: GTT full, evicting something\n", __func__);
+-#endif
+ 		ret = i915_gem_evict_something(dev, obj->size, alignment);
+ 		if (ret)
+ 			return ret;
+@@ -2518,10 +2670,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 		goto search_free;
+ 	}
+ 
+-#if WATCH_BUF
+-	DRM_INFO("Binding object of size %zd at 0x%08x\n",
+-		 obj->size, obj_priv->gtt_offset);
+-#endif
+ 	ret = i915_gem_object_get_pages(obj, gfpmask);
+ 	if (ret) {
+ 		drm_mm_put_block(obj_priv->gtt_space);
+@@ -2553,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 	obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ 					       obj_priv->pages,
+ 					       obj->size >> PAGE_SHIFT,
+-					       obj_priv->gtt_offset,
++					       obj_priv->gtt_space->start,
+ 					       obj_priv->agp_type);
+ 	if (obj_priv->agp_mem == NULL) {
+ 		i915_gem_object_put_pages(obj);
+@@ -2566,11 +2714,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 
+ 		goto search_free;
+ 	}
+-	atomic_inc(&dev->gtt_count);
+-	atomic_add(obj->size, &dev->gtt_memory);
+ 
+ 	/* keep track of bounds object by adding it to the inactive list */
+-	list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++	list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
++	i915_gem_info_add_gtt(dev_priv, obj->size);
+ 
+ 	/* Assert that the object is not currently in any GPU domain. As it
+ 	 * wasn't in the GTT, there shouldn't be any way it could have been in
+@@ -2579,6 +2726,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ 	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ 
++	obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ 	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ 
+ 	return 0;
+@@ -2603,25 +2751,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
+ 
+ /** Flushes any GPU write domain for the object if it's dirty. */
+ static int
+-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
++i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
++				       bool pipelined)
+ {
+ 	struct drm_device *dev = obj->dev;
+ 	uint32_t old_write_domain;
+-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 
+ 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ 		return 0;
+ 
+ 	/* Queue the GPU write cache flushing we need. */
+ 	old_write_domain = obj->write_domain;
+-	i915_gem_flush(dev, 0, obj->write_domain);
+-	if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
+-		return -ENOMEM;
++	i915_gem_flush_ring(dev, NULL,
++			    to_intel_bo(obj)->ring,
++			    0, obj->write_domain);
++	BUG_ON(obj->write_domain);
+ 
+ 	trace_i915_gem_object_change_domain(obj,
+ 					    obj->read_domains,
+ 					    old_write_domain);
+-	return 0;
++
++	if (pipelined)
++		return 0;
++
++	return i915_gem_object_wait_rendering(obj, true);
+ }
+ 
+ /** Flushes the GTT write domain for the object if it's dirty. */
+@@ -2665,26 +2818,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+ 					    old_write_domain);
+ }
+ 
+-int
+-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+-{
+-	int ret = 0;
+-
+-	switch (obj->write_domain) {
+-	case I915_GEM_DOMAIN_GTT:
+-		i915_gem_object_flush_gtt_write_domain(obj);
+-		break;
+-	case I915_GEM_DOMAIN_CPU:
+-		i915_gem_object_flush_cpu_write_domain(obj);
+-		break;
+-	default:
+-		ret = i915_gem_object_flush_gpu_write_domain(obj);
+-		break;
+-	}
+-
+-	return ret;
+-}
+-
+ /**
+  * Moves a single object to the GTT read, and possibly write domain.
+  *
+@@ -2702,32 +2835,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+ 	if (obj_priv->gtt_space == NULL)
+ 		return -EINVAL;
+ 
+-	ret = i915_gem_object_flush_gpu_write_domain(obj);
++	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ 	if (ret != 0)
+ 		return ret;
+ 
+-	/* Wait on any GPU rendering and flushing to occur. */
+-	ret = i915_gem_object_wait_rendering(obj);
+-	if (ret != 0)
+-		return ret;
++	i915_gem_object_flush_cpu_write_domain(obj);
++
++	if (write) {
++		ret = i915_gem_object_wait_rendering(obj, true);
++		if (ret)
++			return ret;
++	}
+ 
+ 	old_write_domain = obj->write_domain;
+ 	old_read_domains = obj->read_domains;
+ 
+-	/* If we're writing through the GTT domain, then CPU and GPU caches
+-	 * will need to be invalidated at next use.
+-	 */
+-	if (write)
+-		obj->read_domains &= I915_GEM_DOMAIN_GTT;
+-
+-	i915_gem_object_flush_cpu_write_domain(obj);
+-
+ 	/* It should now be out of any other write domains, and we can update
+ 	 * the domain values for our changes.
+ 	 */
+ 	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ 	if (write) {
++		obj->read_domains = I915_GEM_DOMAIN_GTT;
+ 		obj->write_domain = I915_GEM_DOMAIN_GTT;
+ 		obj_priv->dirty = 1;
+ 	}
+@@ -2744,51 +2873,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+  * wait, as in modesetting process we're not supposed to be interrupted.
+  */
+ int
+-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
++i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
++				     bool pipelined)
+ {
+-	struct drm_device *dev = obj->dev;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+-	uint32_t old_write_domain, old_read_domains;
++	uint32_t old_read_domains;
+ 	int ret;
+ 
+ 	/* Not valid to be called on unbound objects. */
+ 	if (obj_priv->gtt_space == NULL)
+ 		return -EINVAL;
+ 
+-	ret = i915_gem_object_flush_gpu_write_domain(obj);
++	ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Wait on any GPU rendering and flushing to occur. */
+-	if (obj_priv->active) {
+-#if WATCH_BUF
+-		DRM_INFO("%s: object %p wait for seqno %08x\n",
+-			  __func__, obj, obj_priv->last_rendering_seqno);
+-#endif
+-		ret = i915_do_wait_request(dev,
+-				obj_priv->last_rendering_seqno,
+-				0,
+-				obj_priv->ring);
+-		if (ret != 0)
++	/* Currently, we are always called from an non-interruptible context. */
++	if (!pipelined) {
++		ret = i915_gem_object_wait_rendering(obj, false);
++		if (ret)
+ 			return ret;
+ 	}
+ 
+ 	i915_gem_object_flush_cpu_write_domain(obj);
+ 
+-	old_write_domain = obj->write_domain;
+ 	old_read_domains = obj->read_domains;
+-
+-	/* It should now be out of any other write domains, and we can update
+-	 * the domain values for our changes.
+-	 */
+-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+-	obj->read_domains = I915_GEM_DOMAIN_GTT;
+-	obj->write_domain = I915_GEM_DOMAIN_GTT;
+-	obj_priv->dirty = 1;
++	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ 
+ 	trace_i915_gem_object_change_domain(obj,
+ 					    old_read_domains,
+-					    old_write_domain);
++					    obj->write_domain);
+ 
+ 	return 0;
+ }
+@@ -2805,12 +2919,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ 	uint32_t old_write_domain, old_read_domains;
+ 	int ret;
+ 
+-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+-	if (ret)
+-		return ret;
+-
+-	/* Wait on any GPU rendering and flushing to occur. */
+-	ret = i915_gem_object_wait_rendering(obj);
++	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ 	if (ret != 0)
+ 		return ret;
+ 
+@@ -2821,6 +2930,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ 	 */
+ 	i915_gem_object_set_to_full_cpu_read_domain(obj);
+ 
++	if (write) {
++		ret = i915_gem_object_wait_rendering(obj, true);
++		if (ret)
++			return ret;
++	}
++
+ 	old_write_domain = obj->write_domain;
+ 	old_read_domains = obj->read_domains;
+ 
+@@ -2840,7 +2955,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ 	 * need to be invalidated at next use.
+ 	 */
+ 	if (write) {
+-		obj->read_domains &= I915_GEM_DOMAIN_CPU;
++		obj->read_domains = I915_GEM_DOMAIN_CPU;
+ 		obj->write_domain = I915_GEM_DOMAIN_CPU;
+ 	}
+ 
+@@ -2963,26 +3078,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+  *		drm_agp_chipset_flush
+  */
+ static void
+-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
++i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
++				  struct intel_ring_buffer *ring)
+ {
+ 	struct drm_device		*dev = obj->dev;
+-	drm_i915_private_t		*dev_priv = dev->dev_private;
++	struct drm_i915_private		*dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
+ 	uint32_t			invalidate_domains = 0;
+ 	uint32_t			flush_domains = 0;
+ 	uint32_t			old_read_domains;
+ 
+-	BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
+-	BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
+-
+ 	intel_mark_busy(dev, obj);
+ 
+-#if WATCH_BUF
+-	DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+-		 __func__, obj,
+-		 obj->read_domains, obj->pending_read_domains,
+-		 obj->write_domain, obj->pending_write_domain);
+-#endif
+ 	/*
+ 	 * If the object isn't moving to a new write domain,
+ 	 * let the object stay in multiple read domains
+@@ -2999,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ 	 * write domain
+ 	 */
+ 	if (obj->write_domain &&
+-	    obj->write_domain != obj->pending_read_domains) {
++	    (obj->write_domain != obj->pending_read_domains ||
++	     obj_priv->ring != ring)) {
+ 		flush_domains |= obj->write_domain;
+ 		invalidate_domains |=
+ 			obj->pending_read_domains & ~obj->write_domain;
+@@ -3009,13 +3117,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ 	 * stale data. That is, any new read domains.
+ 	 */
+ 	invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
+-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+-#if WATCH_BUF
+-		DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+-			 __func__, flush_domains, invalidate_domains);
+-#endif
++	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ 		i915_gem_clflush_object(obj);
+-	}
+ 
+ 	old_read_domains = obj->read_domains;
+ 
+@@ -3029,21 +3132,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ 		obj->pending_write_domain = obj->write_domain;
+ 	obj->read_domains = obj->pending_read_domains;
+ 
+-	if (flush_domains & I915_GEM_GPU_DOMAINS) {
+-		if (obj_priv->ring == &dev_priv->render_ring)
+-			dev_priv->flush_rings |= FLUSH_RENDER_RING;
+-		else if (obj_priv->ring == &dev_priv->bsd_ring)
+-			dev_priv->flush_rings |= FLUSH_BSD_RING;
+-	}
+-
+ 	dev->invalidate_domains |= invalidate_domains;
+ 	dev->flush_domains |= flush_domains;
+-#if WATCH_BUF
+-	DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+-		 __func__,
+-		 obj->read_domains, obj->write_domain,
+-		 dev->invalidate_domains, dev->flush_domains);
+-#endif
++	if (flush_domains & I915_GEM_GPU_DOMAINS)
++		dev_priv->mm.flush_rings |= obj_priv->ring->id;
++	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
++		dev_priv->mm.flush_rings |= ring->id;
+ 
+ 	trace_i915_gem_object_change_domain(obj,
+ 					    old_read_domains,
+@@ -3106,12 +3200,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ 	if (offset == 0 && size == obj->size)
+ 		return i915_gem_object_set_to_cpu_domain(obj, 0);
+ 
+-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+-	if (ret)
+-		return ret;
+-
+-	/* Wait on any GPU rendering and flushing to occur. */
+-	ret = i915_gem_object_wait_rendering(obj);
++	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ 	if (ret != 0)
+ 		return ret;
+ 	i915_gem_object_flush_gtt_write_domain(obj);
+@@ -3164,66 +3253,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+  * Pin an object to the GTT and evaluate the relocations landing in it.
+  */
+ static int
+-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+-				 struct drm_file *file_priv,
+-				 struct drm_i915_gem_exec_object2 *entry,
+-				 struct drm_i915_gem_relocation_entry *relocs)
++i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
++			     struct drm_file *file_priv,
++			     struct drm_i915_gem_exec_object2 *entry)
+ {
+-	struct drm_device *dev = obj->dev;
++	struct drm_device *dev = obj->base.dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+-	int i, ret;
+-	void __iomem *reloc_page;
+-	bool need_fence;
+-
+-	need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+-	             obj_priv->tiling_mode != I915_TILING_NONE;
+-
+-	/* Check fence reg constraints and rebind if necessary */
+-	if (need_fence &&
+-	    !i915_gem_object_fence_offset_ok(obj,
+-					     obj_priv->tiling_mode)) {
+-		ret = i915_gem_object_unbind(obj);
+-		if (ret)
+-			return ret;
+-	}
++	struct drm_i915_gem_relocation_entry __user *user_relocs;
++	struct drm_gem_object *target_obj = NULL;
++	uint32_t target_handle = 0;
++	int i, ret = 0;
+ 
+-	/* Choose the GTT offset for our buffer and put it there. */
+-	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+-	if (ret)
+-		return ret;
++	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
++	for (i = 0; i < entry->relocation_count; i++) {
++		struct drm_i915_gem_relocation_entry reloc;
++		uint32_t target_offset;
+ 
+-	/*
+-	 * Pre-965 chips need a fence register set up in order to
+-	 * properly handle blits to/from tiled surfaces.
+-	 */
+-	if (need_fence) {
+-		ret = i915_gem_object_get_fence_reg(obj);
+-		if (ret != 0) {
+-			i915_gem_object_unpin(obj);
+-			return ret;
++		if (__copy_from_user_inatomic(&reloc,
++					      user_relocs+i,
++					      sizeof(reloc))) {
++			ret = -EFAULT;
++			break;
+ 		}
+-	}
+ 
+-	entry->offset = obj_priv->gtt_offset;
++		if (reloc.target_handle != target_handle) {
++			drm_gem_object_unreference(target_obj);
+ 
+-	/* Apply the relocations, using the GTT aperture to avoid cache
+-	 * flushing requirements.
+-	 */
+-	for (i = 0; i < entry->relocation_count; i++) {
+-		struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
+-		struct drm_gem_object *target_obj;
+-		struct drm_i915_gem_object *target_obj_priv;
+-		uint32_t reloc_val, reloc_offset;
+-		uint32_t __iomem *reloc_entry;
+-
+-		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+-						   reloc->target_handle);
+-		if (target_obj == NULL) {
+-			i915_gem_object_unpin(obj);
+-			return -ENOENT;
++			target_obj = drm_gem_object_lookup(dev, file_priv,
++							   reloc.target_handle);
++			if (target_obj == NULL) {
++				ret = -ENOENT;
++				break;
++			}
++
++			target_handle = reloc.target_handle;
+ 		}
+-		target_obj_priv = to_intel_bo(target_obj);
++		target_offset = to_intel_bo(target_obj)->gtt_offset;
+ 
+ #if WATCH_RELOC
+ 		DRM_INFO("%s: obj %p offset %08x target %d "
+@@ -3231,268 +3296,313 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ 			 "presumed %08x delta %08x\n",
+ 			 __func__,
+ 			 obj,
+-			 (int) reloc->offset,
+-			 (int) reloc->target_handle,
+-			 (int) reloc->read_domains,
+-			 (int) reloc->write_domain,
+-			 (int) target_obj_priv->gtt_offset,
+-			 (int) reloc->presumed_offset,
+-			 reloc->delta);
++			 (int) reloc.offset,
++			 (int) reloc.target_handle,
++			 (int) reloc.read_domains,
++			 (int) reloc.write_domain,
++			 (int) target_offset,
++			 (int) reloc.presumed_offset,
++			 reloc.delta);
+ #endif
+ 
+ 		/* The target buffer should have appeared before us in the
+ 		 * exec_object list, so it should have a GTT space bound by now.
+ 		 */
+-		if (target_obj_priv->gtt_space == NULL) {
++		if (target_offset == 0) {
+ 			DRM_ERROR("No GTT space found for object %d\n",
+-				  reloc->target_handle);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  reloc.target_handle);
++			ret = -EINVAL;
++			break;
+ 		}
+ 
+ 		/* Validate that the target is in a valid r/w GPU domain */
+-		if (reloc->write_domain & (reloc->write_domain - 1)) {
++		if (reloc.write_domain & (reloc.write_domain - 1)) {
+ 			DRM_ERROR("reloc with multiple write domains: "
+ 				  "obj %p target %d offset %d "
+ 				  "read %08x write %08x",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->offset,
+-				  reloc->read_domains,
+-				  reloc->write_domain);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  obj, reloc.target_handle,
++				  (int) reloc.offset,
++				  reloc.read_domains,
++				  reloc.write_domain);
++			ret = -EINVAL;
++			break;
+ 		}
+-		if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+-		    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
++		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
++		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ 			DRM_ERROR("reloc with read/write CPU domains: "
+ 				  "obj %p target %d offset %d "
+ 				  "read %08x write %08x",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->offset,
+-				  reloc->read_domains,
+-				  reloc->write_domain);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  obj, reloc.target_handle,
++				  (int) reloc.offset,
++				  reloc.read_domains,
++				  reloc.write_domain);
++			ret = -EINVAL;
++			break;
+ 		}
+-		if (reloc->write_domain && target_obj->pending_write_domain &&
+-		    reloc->write_domain != target_obj->pending_write_domain) {
++		if (reloc.write_domain && target_obj->pending_write_domain &&
++		    reloc.write_domain != target_obj->pending_write_domain) {
+ 			DRM_ERROR("Write domain conflict: "
+ 				  "obj %p target %d offset %d "
+ 				  "new %08x old %08x\n",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->offset,
+-				  reloc->write_domain,
++				  obj, reloc.target_handle,
++				  (int) reloc.offset,
++				  reloc.write_domain,
+ 				  target_obj->pending_write_domain);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++			ret = -EINVAL;
++			break;
+ 		}
+ 
+-		target_obj->pending_read_domains |= reloc->read_domains;
+-		target_obj->pending_write_domain |= reloc->write_domain;
++		target_obj->pending_read_domains |= reloc.read_domains;
++		target_obj->pending_write_domain |= reloc.write_domain;
+ 
+ 		/* If the relocation already has the right value in it, no
+ 		 * more work needs to be done.
+ 		 */
+-		if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
+-			drm_gem_object_unreference(target_obj);
++		if (target_offset == reloc.presumed_offset)
+ 			continue;
+-		}
+ 
+ 		/* Check that the relocation address is valid... */
+-		if (reloc->offset > obj->size - 4) {
++		if (reloc.offset > obj->base.size - 4) {
+ 			DRM_ERROR("Relocation beyond object bounds: "
+ 				  "obj %p target %d offset %d size %d.\n",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->offset, (int) obj->size);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  obj, reloc.target_handle,
++				  (int) reloc.offset, (int) obj->base.size);
++			ret = -EINVAL;
++			break;
+ 		}
+-		if (reloc->offset & 3) {
++		if (reloc.offset & 3) {
+ 			DRM_ERROR("Relocation not 4-byte aligned: "
+ 				  "obj %p target %d offset %d.\n",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->offset);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  obj, reloc.target_handle,
++				  (int) reloc.offset);
++			ret = -EINVAL;
++			break;
+ 		}
+ 
+ 		/* and points to somewhere within the target object. */
+-		if (reloc->delta >= target_obj->size) {
++		if (reloc.delta >= target_obj->size) {
+ 			DRM_ERROR("Relocation beyond target object bounds: "
+ 				  "obj %p target %d delta %d size %d.\n",
+-				  obj, reloc->target_handle,
+-				  (int) reloc->delta, (int) target_obj->size);
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
++				  obj, reloc.target_handle,
++				  (int) reloc.delta, (int) target_obj->size);
++			ret = -EINVAL;
++			break;
+ 		}
+ 
+-		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+-		if (ret != 0) {
+-			drm_gem_object_unreference(target_obj);
+-			i915_gem_object_unpin(obj);
+-			return -EINVAL;
+-		}
++		reloc.delta += target_offset;
++		if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
++			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
++			char *vaddr;
+ 
+-		/* Map the page containing the relocation we're going to
+-		 * perform.
+-		 */
+-		reloc_offset = obj_priv->gtt_offset + reloc->offset;
+-		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+-						      (reloc_offset &
+-						       ~(PAGE_SIZE - 1)),
+-						      KM_USER0);
+-		reloc_entry = (uint32_t __iomem *)(reloc_page +
+-						   (reloc_offset & (PAGE_SIZE - 1)));
+-		reloc_val = target_obj_priv->gtt_offset + reloc->delta;
+-
+-#if WATCH_BUF
+-		DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+-			  obj, (unsigned int) reloc->offset,
+-			  readl(reloc_entry), reloc_val);
+-#endif
+-		writel(reloc_val, reloc_entry);
+-		io_mapping_unmap_atomic(reloc_page, KM_USER0);
++			vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
++			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
++			kunmap_atomic(vaddr, KM_USER0);
++		} else {
++			uint32_t __iomem *reloc_entry;
++			void __iomem *reloc_page;
+ 
+-		/* The updated presumed offset for this entry will be
+-		 * copied back out to the user.
+-		 */
+-		reloc->presumed_offset = target_obj_priv->gtt_offset;
++			ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
++			if (ret)
++				break;
++
++			/* Map the page containing the relocation we're going to perform.  */
++			reloc.offset += obj->gtt_offset;
++			reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++							      reloc.offset & PAGE_MASK,
++							      KM_USER0);
++			reloc_entry = (uint32_t __iomem *)
++				(reloc_page + (reloc.offset & ~PAGE_MASK));
++			iowrite32(reloc.delta, reloc_entry);
++			io_mapping_unmap_atomic(reloc_page, KM_USER0);
++		}
+ 
+-		drm_gem_object_unreference(target_obj);
++		/* and update the user's relocation entry */
++		reloc.presumed_offset = target_offset;
++		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
++					      &reloc.presumed_offset,
++					      sizeof(reloc.presumed_offset))) {
++		    ret = -EFAULT;
++		    break;
++		}
+ 	}
+ 
+-#if WATCH_BUF
+-	if (0)
+-		i915_gem_dump_object(obj, 128, __func__, ~0);
+-#endif
+-	return 0;
++	drm_gem_object_unreference(target_obj);
++	return ret;
+ }
+ 
+-/* Throttle our rendering by waiting until the ring has completed our requests
+- * emitted over 20 msec ago.
+- *
+- * Note that if we were to use the current jiffies each time around the loop,
+- * we wouldn't escape the function with any frames outstanding if the time to
+- * render a frame was over 20ms.
+- *
+- * This should get us reasonable parallelism between CPU and GPU but also
+- * relatively low latency when blocking on a particular request to finish.
+- */
+ static int
+-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
++i915_gem_execbuffer_pin(struct drm_device *dev,
++			struct drm_file *file,
++			struct drm_gem_object **object_list,
++			struct drm_i915_gem_exec_object2 *exec_list,
++			int count)
+ {
+-	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+-	int ret = 0;
+-	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret, i, retry;
+ 
+-	mutex_lock(&dev->struct_mutex);
+-	while (!list_empty(&i915_file_priv->mm.request_list)) {
+-		struct drm_i915_gem_request *request;
++	/* attempt to pin all of the buffers into the GTT */
++	for (retry = 0; retry < 2; retry++) {
++		ret = 0;
++		for (i = 0; i < count; i++) {
++			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
++			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
++			bool need_fence =
++				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++				obj->tiling_mode != I915_TILING_NONE;
++
++			/* Check fence reg constraints and rebind if necessary */
++			if (need_fence &&
++			    !i915_gem_object_fence_offset_ok(&obj->base,
++							     obj->tiling_mode)) {
++				ret = i915_gem_object_unbind(&obj->base);
++				if (ret)
++					break;
++			}
+ 
+-		request = list_first_entry(&i915_file_priv->mm.request_list,
+-					   struct drm_i915_gem_request,
+-					   client_list);
++			ret = i915_gem_object_pin(&obj->base, entry->alignment);
++			if (ret)
++				break;
+ 
+-		if (time_after_eq(request->emitted_jiffies, recent_enough))
+-			break;
++			/*
++			 * Pre-965 chips need a fence register set up in order
++			 * to properly handle blits to/from tiled surfaces.
++			 */
++			if (need_fence) {
++				ret = i915_gem_object_get_fence_reg(&obj->base, true);
++				if (ret) {
++					i915_gem_object_unpin(&obj->base);
++					break;
++				}
++
++				dev_priv->fence_regs[obj->fence_reg].gpu = true;
++			}
++
++			entry->offset = obj->gtt_offset;
++		}
+ 
+-		ret = i915_wait_request(dev, request->seqno, request->ring);
+-		if (ret != 0)
++		while (i--)
++			i915_gem_object_unpin(object_list[i]);
++
++		if (ret == 0)
+ 			break;
++
++		if (ret != -ENOSPC || retry)
++			return ret;
++
++		ret = i915_gem_evict_everything(dev);
++		if (ret)
++			return ret;
+ 	}
+-	mutex_unlock(&dev->struct_mutex);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int
+-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
+-			      uint32_t buffer_count,
+-			      struct drm_i915_gem_relocation_entry **relocs)
++i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
++				struct drm_file *file,
++				struct intel_ring_buffer *ring,
++				struct drm_gem_object **objects,
++				int count)
+ {
+-	uint32_t reloc_count = 0, reloc_index = 0, i;
+-	int ret;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret, i;
+ 
+-	*relocs = NULL;
+-	for (i = 0; i < buffer_count; i++) {
+-		if (reloc_count + exec_list[i].relocation_count < reloc_count)
+-			return -EINVAL;
+-		reloc_count += exec_list[i].relocation_count;
+-	}
++	/* Zero the global flush/invalidate flags. These
++	 * will be modified as new domains are computed
++	 * for each object
++	 */
++	dev->invalidate_domains = 0;
++	dev->flush_domains = 0;
++	dev_priv->mm.flush_rings = 0;
++	for (i = 0; i < count; i++)
++		i915_gem_object_set_to_gpu_domain(objects[i], ring);
+ 
+-	*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
+-	if (*relocs == NULL) {
+-		DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
+-		return -ENOMEM;
++	if (dev->invalidate_domains | dev->flush_domains) {
++#if WATCH_EXEC
++		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++			  __func__,
++			 dev->invalidate_domains,
++			 dev->flush_domains);
++#endif
++		i915_gem_flush(dev, file,
++			       dev->invalidate_domains,
++			       dev->flush_domains,
++			       dev_priv->mm.flush_rings);
+ 	}
+ 
+-	for (i = 0; i < buffer_count; i++) {
+-		struct drm_i915_gem_relocation_entry __user *user_relocs;
+-
+-		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+-
+-		ret = copy_from_user(&(*relocs)[reloc_index],
+-				     user_relocs,
+-				     exec_list[i].relocation_count *
+-				     sizeof(**relocs));
+-		if (ret != 0) {
+-			drm_free_large(*relocs);
+-			*relocs = NULL;
+-			return -EFAULT;
++	for (i = 0; i < count; i++) {
++		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
++		/* XXX replace with semaphores */
++		if (obj->ring && ring != obj->ring) {
++			ret = i915_gem_object_wait_rendering(&obj->base, true);
++			if (ret)
++				return ret;
+ 		}
+-
+-		reloc_index += exec_list[i].relocation_count;
+ 	}
+ 
+ 	return 0;
+ }
+ 
++/* Throttle our rendering by waiting until the ring has completed our requests
++ * emitted over 20 msec ago.
++ *
++ * Note that if we were to use the current jiffies each time around the loop,
++ * we wouldn't escape the function with any frames outstanding if the time to
++ * render a frame was over 20ms.
++ *
++ * This should get us reasonable parallelism between CPU and GPU but also
++ * relatively low latency when blocking on a particular request to finish.
++ */
+ static int
+-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
+-			    uint32_t buffer_count,
+-			    struct drm_i915_gem_relocation_entry *relocs)
++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+ {
+-	uint32_t reloc_count = 0, i;
+-	int ret = 0;
+-
+-	if (relocs == NULL)
+-	    return 0;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_i915_file_private *file_priv = file->driver_priv;
++	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
++	struct drm_i915_gem_request *request;
++	struct intel_ring_buffer *ring = NULL;
++	u32 seqno = 0;
++	int ret;
+ 
+-	for (i = 0; i < buffer_count; i++) {
+-		struct drm_i915_gem_relocation_entry __user *user_relocs;
+-		int unwritten;
++	spin_lock(&file_priv->mm.lock);
++	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
++		if (time_after_eq(request->emitted_jiffies, recent_enough))
++			break;
+ 
+-		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
++		ring = request->ring;
++		seqno = request->seqno;
++	}
++	spin_unlock(&file_priv->mm.lock);
+ 
+-		unwritten = copy_to_user(user_relocs,
+-					 &relocs[reloc_count],
+-					 exec_list[i].relocation_count *
+-					 sizeof(*relocs));
++	if (seqno == 0)
++		return 0;
+ 
+-		if (unwritten) {
+-			ret = -EFAULT;
+-			goto err;
+-		}
++	ret = 0;
++	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
++		/* And wait for the seqno passing without holding any locks and
++		 * causing extra latency for others. This is safe as the irq
++		 * generation is designed to be run atomically and so is
++		 * lockless.
++		 */
++		ring->user_irq_get(dev, ring);
++		ret = wait_event_interruptible(ring->irq_queue,
++					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
++					       || atomic_read(&dev_priv->mm.wedged));
++		ring->user_irq_put(dev, ring);
+ 
+-		reloc_count += exec_list[i].relocation_count;
++		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
++			ret = -EIO;
+ 	}
+ 
+-err:
+-	drm_free_large(relocs);
++	if (ret == 0)
++		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ 
+ 	return ret;
+ }
+ 
+ static int
+-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
+-			   uint64_t exec_offset)
++i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
++			  uint64_t exec_offset)
+ {
+ 	uint32_t exec_start, exec_len;
+ 
+@@ -3509,44 +3619,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
+ }
+ 
+ static int
+-i915_gem_wait_for_pending_flip(struct drm_device *dev,
+-			       struct drm_gem_object **object_list,
+-			       int count)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_gem_object *obj_priv;
+-	DEFINE_WAIT(wait);
+-	int i, ret = 0;
++validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
++		   int count)
++{
++	int i;
+ 
+-	for (;;) {
+-		prepare_to_wait(&dev_priv->pending_flip_queue,
+-				&wait, TASK_INTERRUPTIBLE);
+-		for (i = 0; i < count; i++) {
+-			obj_priv = to_intel_bo(object_list[i]);
+-			if (atomic_read(&obj_priv->pending_flip) > 0)
+-				break;
+-		}
+-		if (i == count)
+-			break;
++	for (i = 0; i < count; i++) {
++		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
++		size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
+ 
+-		if (!signal_pending(current)) {
+-			mutex_unlock(&dev->struct_mutex);
+-			schedule();
+-			mutex_lock(&dev->struct_mutex);
+-			continue;
+-		}
+-		ret = -ERESTARTSYS;
+-		break;
++		if (!access_ok(VERIFY_READ, ptr, length))
++			return -EFAULT;
++
++		/* we may also need to update the presumed offsets */
++		if (!access_ok(VERIFY_WRITE, ptr, length))
++			return -EFAULT;
++
++		if (fault_in_pages_readable(ptr, length))
++			return -EFAULT;
+ 	}
+-	finish_wait(&dev_priv->pending_flip_queue, &wait);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+-
+-int
++static int
+ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+-		       struct drm_file *file_priv,
++		       struct drm_file *file,
+ 		       struct drm_i915_gem_execbuffer2 *args,
+ 		       struct drm_i915_gem_exec_object2 *exec_list)
+ {
+@@ -3555,26 +3653,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	struct drm_gem_object *batch_obj;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	struct drm_clip_rect *cliprects = NULL;
+-	struct drm_i915_gem_relocation_entry *relocs = NULL;
+-	int ret = 0, ret2, i, pinned = 0;
++	struct drm_i915_gem_request *request = NULL;
++	int ret, i, flips;
+ 	uint64_t exec_offset;
+-	uint32_t seqno, flush_domains, reloc_index;
+-	int pin_tries, flips;
+ 
+ 	struct intel_ring_buffer *ring = NULL;
+ 
++	ret = i915_gem_check_is_wedged(dev);
++	if (ret)
++		return ret;
++
++	ret = validate_exec_list(exec_list, args->buffer_count);
++	if (ret)
++		return ret;
++
+ #if WATCH_EXEC
+ 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ 		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+ #endif
+-	if (args->flags & I915_EXEC_BSD) {
++	switch (args->flags & I915_EXEC_RING_MASK) {
++	case I915_EXEC_DEFAULT:
++	case I915_EXEC_RENDER:
++		ring = &dev_priv->render_ring;
++		break;
++	case I915_EXEC_BSD:
+ 		if (!HAS_BSD(dev)) {
+-			DRM_ERROR("execbuf with wrong flag\n");
++			DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ 			return -EINVAL;
+ 		}
+ 		ring = &dev_priv->bsd_ring;
+-	} else {
+-		ring = &dev_priv->render_ring;
++		break;
++	case I915_EXEC_BLT:
++		if (!HAS_BLT(dev)) {
++			DRM_ERROR("execbuf with invalid ring (BLT)\n");
++			return -EINVAL;
++		}
++		ring = &dev_priv->blt_ring;
++		break;
++	default:
++		DRM_ERROR("execbuf with unknown ring: %d\n",
++			  (int)(args->flags & I915_EXEC_RING_MASK));
++		return -EINVAL;
+ 	}
+ 
+ 	if (args->buffer_count < 1) {
+@@ -3609,20 +3728,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		}
+ 	}
+ 
+-	ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+-					    &relocs);
+-	if (ret != 0)
++	request = kzalloc(sizeof(*request), GFP_KERNEL);
++	if (request == NULL) {
++		ret = -ENOMEM;
+ 		goto pre_mutex_err;
++	}
+ 
+-	mutex_lock(&dev->struct_mutex);
+-
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+-	if (atomic_read(&dev_priv->mm.wedged)) {
+-		mutex_unlock(&dev->struct_mutex);
+-		ret = -EIO;
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
+ 		goto pre_mutex_err;
+-	}
+ 
+ 	if (dev_priv->mm.suspended) {
+ 		mutex_unlock(&dev->struct_mutex);
+@@ -3631,9 +3745,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	/* Look up object handles */
+-	flips = 0;
+ 	for (i = 0; i < args->buffer_count; i++) {
+-		object_list[i] = drm_gem_object_lookup(dev, file_priv,
++		object_list[i] = drm_gem_object_lookup(dev, file,
+ 						       exec_list[i].handle);
+ 		if (object_list[i] == NULL) {
+ 			DRM_ERROR("Invalid object handle %d at index %d\n",
+@@ -3654,75 +3767,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 			goto err;
+ 		}
+ 		obj_priv->in_execbuffer = true;
+-		flips += atomic_read(&obj_priv->pending_flip);
+-	}
+-
+-	if (flips > 0) {
+-		ret = i915_gem_wait_for_pending_flip(dev, object_list,
+-						     args->buffer_count);
+-		if (ret)
+-			goto err;
+ 	}
+ 
+-	/* Pin and relocate */
+-	for (pin_tries = 0; ; pin_tries++) {
+-		ret = 0;
+-		reloc_index = 0;
+-
+-		for (i = 0; i < args->buffer_count; i++) {
+-			object_list[i]->pending_read_domains = 0;
+-			object_list[i]->pending_write_domain = 0;
+-			ret = i915_gem_object_pin_and_relocate(object_list[i],
+-							       file_priv,
+-							       &exec_list[i],
+-							       &relocs[reloc_index]);
+-			if (ret)
+-				break;
+-			pinned = i + 1;
+-			reloc_index += exec_list[i].relocation_count;
+-		}
+-		/* success */
+-		if (ret == 0)
+-			break;
+-
+-		/* error other than GTT full, or we've already tried again */
+-		if (ret != -ENOSPC || pin_tries >= 1) {
+-			if (ret != -ERESTARTSYS) {
+-				unsigned long long total_size = 0;
+-				int num_fences = 0;
+-				for (i = 0; i < args->buffer_count; i++) {
+-					obj_priv = to_intel_bo(object_list[i]);
+-
+-					total_size += object_list[i]->size;
+-					num_fences +=
+-						exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+-						obj_priv->tiling_mode != I915_TILING_NONE;
+-				}
+-				DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
+-					  pinned+1, args->buffer_count,
+-					  total_size, num_fences,
+-					  ret);
+-				DRM_ERROR("%d objects [%d pinned], "
+-					  "%d object bytes [%d pinned], "
+-					  "%d/%d gtt bytes\n",
+-					  atomic_read(&dev->object_count),
+-					  atomic_read(&dev->pin_count),
+-					  atomic_read(&dev->object_memory),
+-					  atomic_read(&dev->pin_memory),
+-					  atomic_read(&dev->gtt_memory),
+-					  dev->gtt_total);
+-			}
+-			goto err;
+-		}
+-
+-		/* unpin all of our buffers */
+-		for (i = 0; i < pinned; i++)
+-			i915_gem_object_unpin(object_list[i]);
+-		pinned = 0;
++	/* Move the objects en-masse into the GTT, evicting if necessary. */
++	ret = i915_gem_execbuffer_pin(dev, file,
++				      object_list, exec_list,
++				      args->buffer_count);
++	if (ret)
++		goto err;
+ 
+-		/* evict everyone we can from the aperture */
+-		ret = i915_gem_evict_everything(dev);
+-		if (ret && ret != -ENOSPC)
++	/* The objects are in their final locations, apply the relocations. */
++	for (i = 0; i < args->buffer_count; i++) {
++		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
++		obj->base.pending_read_domains = 0;
++		obj->base.pending_write_domain = 0;
++		ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
++		if (ret)
+ 			goto err;
+ 	}
+ 
+@@ -3735,72 +3795,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	}
+ 	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ 
+-	/* Sanity check the batch buffer, prior to moving objects */
+-	exec_offset = exec_list[args->buffer_count - 1].offset;
+-	ret = i915_gem_check_execbuffer (args, exec_offset);
++	/* Sanity check the batch buffer */
++	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
++	ret = i915_gem_check_execbuffer(args, exec_offset);
+ 	if (ret != 0) {
+ 		DRM_ERROR("execbuf with invalid offset/length\n");
+ 		goto err;
+ 	}
+ 
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+-	/* Zero the global flush/invalidate flags. These
+-	 * will be modified as new domains are computed
+-	 * for each object
+-	 */
+-	dev->invalidate_domains = 0;
+-	dev->flush_domains = 0;
+-	dev_priv->flush_rings = 0;
+-
+-	for (i = 0; i < args->buffer_count; i++) {
+-		struct drm_gem_object *obj = object_list[i];
+-
+-		/* Compute new gpu domains and update invalidate/flush */
+-		i915_gem_object_set_to_gpu_domain(obj);
+-	}
+-
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+-	if (dev->invalidate_domains | dev->flush_domains) {
+-#if WATCH_EXEC
+-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+-			  __func__,
+-			 dev->invalidate_domains,
+-			 dev->flush_domains);
+-#endif
+-		i915_gem_flush(dev,
+-			       dev->invalidate_domains,
+-			       dev->flush_domains);
+-		if (dev_priv->flush_rings & FLUSH_RENDER_RING)
+-			(void)i915_add_request(dev, file_priv,
+-					       dev->flush_domains,
+-					       &dev_priv->render_ring);
+-		if (dev_priv->flush_rings & FLUSH_BSD_RING)
+-			(void)i915_add_request(dev, file_priv,
+-					       dev->flush_domains,
+-					       &dev_priv->bsd_ring);
+-	}
++	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
++					      object_list, args->buffer_count);
++	if (ret)
++		goto err;
+ 
+ 	for (i = 0; i < args->buffer_count; i++) {
+ 		struct drm_gem_object *obj = object_list[i];
+-		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 		uint32_t old_write_domain = obj->write_domain;
+-
+ 		obj->write_domain = obj->pending_write_domain;
+-		if (obj->write_domain)
+-			list_move_tail(&obj_priv->gpu_write_list,
+-				       &dev_priv->mm.gpu_write_list);
+-		else
+-			list_del_init(&obj_priv->gpu_write_list);
+-
+ 		trace_i915_gem_object_change_domain(obj,
+ 						    obj->read_domains,
+ 						    old_write_domain);
+ 	}
+ 
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+ #if WATCH_COHERENCY
+ 	for (i = 0; i < args->buffer_count; i++) {
+ 		i915_gem_object_check_coherency(object_list[i],
+@@ -3815,9 +3831,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 			      ~0);
+ #endif
+ 
++	/* Check for any pending flips. As we only maintain a flip queue depth
++	 * of 1, we can simply insert a WAIT for the next display flip prior
++	 * to executing the batch and avoid stalling the CPU.
++	 */
++	flips = 0;
++	for (i = 0; i < args->buffer_count; i++) {
++		if (object_list[i]->write_domain)
++			flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
++	}
++	if (flips) {
++		int plane, flip_mask;
++
++		for (plane = 0; flips >> plane; plane++) {
++			if (((flips >> plane) & 1) == 0)
++				continue;
++
++			if (plane)
++				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
++			else
++				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
++
++			intel_ring_begin(dev, ring, 2);
++			intel_ring_emit(dev, ring,
++					MI_WAIT_FOR_EVENT | flip_mask);
++			intel_ring_emit(dev, ring, MI_NOOP);
++			intel_ring_advance(dev, ring);
++		}
++	}
++
+ 	/* Exec the batchbuffer */
+ 	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+-			cliprects, exec_offset);
++					    cliprects, exec_offset);
+ 	if (ret) {
+ 		DRM_ERROR("dispatch failed %d\n", ret);
+ 		goto err;
+@@ -3827,38 +3872,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	 * Ensure that the commands in the batch buffer are
+ 	 * finished before the interrupt fires
+ 	 */
+-	flush_domains = i915_retire_commands(dev, ring);
+-
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	i915_retire_commands(dev, ring);
+ 
+-	/*
+-	 * Get a seqno representing the execution of the current buffer,
+-	 * which we can wait on.  We would like to mitigate these interrupts,
+-	 * likely by only creating seqnos occasionally (so that we have
+-	 * *some* interrupts representing completion of buffers that we can
+-	 * wait on when trying to clear up gtt space).
+-	 */
+-	seqno = i915_add_request(dev, file_priv, flush_domains, ring);
+-	BUG_ON(seqno == 0);
+ 	for (i = 0; i < args->buffer_count; i++) {
+ 		struct drm_gem_object *obj = object_list[i];
+-		obj_priv = to_intel_bo(obj);
+ 
+-		i915_gem_object_move_to_active(obj, seqno, ring);
+-#if WATCH_LRU
+-		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+-#endif
++		i915_gem_object_move_to_active(obj, ring);
++		if (obj->write_domain)
++			list_move_tail(&to_intel_bo(obj)->gpu_write_list,
++				       &ring->gpu_write_list);
+ 	}
+-#if WATCH_LRU
+-	i915_dump_lru(dev, __func__);
+-#endif
+ 
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	i915_add_request(dev, file, request, ring);
++	request = NULL;
+ 
+ err:
+-	for (i = 0; i < pinned; i++)
+-		i915_gem_object_unpin(object_list[i]);
+-
+ 	for (i = 0; i < args->buffer_count; i++) {
+ 		if (object_list[i]) {
+ 			obj_priv = to_intel_bo(object_list[i]);
+@@ -3870,22 +3898,9 @@ err:
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+ pre_mutex_err:
+-	/* Copy the updated relocations out regardless of current error
+-	 * state.  Failure to update the relocs would mean that the next
+-	 * time userland calls execbuf, it would do so with presumed offset
+-	 * state that didn't match the actual object state.
+-	 */
+-	ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+-					   relocs);
+-	if (ret2 != 0) {
+-		DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+-
+-		if (ret == 0)
+-			ret = ret2;
+-	}
+-
+ 	drm_free_large(object_list);
+ 	kfree(cliprects);
++	kfree(request);
+ 
+ 	return ret;
+ }
+@@ -3942,7 +3957,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ 		exec2_list[i].alignment = exec_list[i].alignment;
+ 		exec2_list[i].offset = exec_list[i].offset;
+-		if (!IS_I965G(dev))
++		if (INTEL_INFO(dev)->gen < 4)
+ 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ 		else
+ 			exec2_list[i].flags = 0;
+@@ -4039,12 +4054,12 @@ int
+ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+ {
+ 	struct drm_device *dev = obj->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 	int ret;
+ 
+ 	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+-
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	WARN_ON(i915_verify_lists(dev));
+ 
+ 	if (obj_priv->gtt_space != NULL) {
+ 		if (alignment == 0)
+@@ -4072,14 +4087,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+ 	 * remove it from the inactive list
+ 	 */
+ 	if (obj_priv->pin_count == 1) {
+-		atomic_inc(&dev->pin_count);
+-		atomic_add(obj->size, &dev->pin_memory);
+-		if (!obj_priv->active &&
+-		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+-			list_del_init(&obj_priv->list);
++		i915_gem_info_add_pin(dev_priv, obj->size);
++		if (!obj_priv->active)
++			list_move_tail(&obj_priv->mm_list,
++				       &dev_priv->mm.pinned_list);
+ 	}
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
+ 
++	WARN_ON(i915_verify_lists(dev));
+ 	return 0;
+ }
+ 
+@@ -4090,7 +4104,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	WARN_ON(i915_verify_lists(dev));
+ 	obj_priv->pin_count--;
+ 	BUG_ON(obj_priv->pin_count < 0);
+ 	BUG_ON(obj_priv->gtt_space == NULL);
+@@ -4100,14 +4114,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
+ 	 * the inactive list
+ 	 */
+ 	if (obj_priv->pin_count == 0) {
+-		if (!obj_priv->active &&
+-		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+-			list_move_tail(&obj_priv->list,
++		if (!obj_priv->active)
++			list_move_tail(&obj_priv->mm_list,
+ 				       &dev_priv->mm.inactive_list);
+-		atomic_dec(&dev->pin_count);
+-		atomic_sub(obj->size, &dev->pin_memory);
++		i915_gem_info_remove_pin(dev_priv, obj->size);
+ 	}
+-	i915_verify_inactive(dev, __FILE__, __LINE__);
++	WARN_ON(i915_verify_lists(dev));
+ }
+ 
+ int
+@@ -4119,41 +4131,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_i915_gem_object *obj_priv;
+ 	int ret;
+ 
+-	mutex_lock(&dev->struct_mutex);
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL) {
+-		DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+-			  args->handle);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto unlock;
+ 	}
+ 	obj_priv = to_intel_bo(obj);
+ 
+ 	if (obj_priv->madv != I915_MADV_WILLNEED) {
+ 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
+-		drm_gem_object_unreference(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ 			  args->handle);
+-		drm_gem_object_unreference(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	obj_priv->user_pin_count++;
+ 	obj_priv->pin_filp = file_priv;
+ 	if (obj_priv->user_pin_count == 1) {
+ 		ret = i915_gem_object_pin(obj, args->alignment);
+-		if (ret != 0) {
+-			drm_gem_object_unreference(obj);
+-			mutex_unlock(&dev->struct_mutex);
+-			return ret;
+-		}
++		if (ret)
++			goto out;
+ 	}
+ 
+ 	/* XXX - flush the CPU caches for pinned objects
+@@ -4161,10 +4168,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ 	 */
+ 	i915_gem_object_flush_cpu_write_domain(obj);
+ 	args->offset = obj_priv->gtt_offset;
++out:
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-
+-	return 0;
++	return ret;
+ }
+ 
+ int
+@@ -4174,24 +4182,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_i915_gem_pin *args = data;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
++	int ret;
+ 
+-	mutex_lock(&dev->struct_mutex);
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL) {
+-		DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+-			  args->handle);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto unlock;
+ 	}
+-
+ 	obj_priv = to_intel_bo(obj);
++
+ 	if (obj_priv->pin_filp != file_priv) {
+ 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ 			  args->handle);
+-		drm_gem_object_unreference(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 	obj_priv->user_pin_count--;
+ 	if (obj_priv->user_pin_count == 0) {
+@@ -4199,9 +4207,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ 		i915_gem_object_unpin(obj);
+ 	}
+ 
++out:
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-	return 0;
++	return ret;
+ }
+ 
+ int
+@@ -4211,22 +4221,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_i915_gem_busy *args = data;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
++	int ret;
++
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL) {
+-		DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+-			  args->handle);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto unlock;
+ 	}
+-
+-	mutex_lock(&dev->struct_mutex);
++	obj_priv = to_intel_bo(obj);
+ 
+ 	/* Count all active objects as busy, even if they are currently not used
+ 	 * by the gpu. Users of this interface expect objects to eventually
+ 	 * become non-busy without any further actions, therefore emit any
+ 	 * necessary flushes here.
+ 	 */
+-	obj_priv = to_intel_bo(obj);
+ 	args->busy = obj_priv->active;
+ 	if (args->busy) {
+ 		/* Unconditionally flush objects, even when the gpu still uses this
+@@ -4234,10 +4246,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ 		 * use this buffer rather sooner than later, so issuing the required
+ 		 * flush earlier is beneficial.
+ 		 */
+-		if (obj->write_domain) {
+-			i915_gem_flush(dev, 0, obj->write_domain);
+-			(void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+-		}
++		if (obj->write_domain & I915_GEM_GPU_DOMAINS)
++			i915_gem_flush_ring(dev, file_priv,
++					    obj_priv->ring,
++					    0, obj->write_domain);
+ 
+ 		/* Update the active list for the hardware's current position.
+ 		 * Otherwise this only updates on a delayed timer or when irqs
+@@ -4250,8 +4262,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-	return 0;
++	return ret;
+ }
+ 
+ int
+@@ -4268,6 +4281,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_i915_gem_madvise *args = data;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
++	int ret;
+ 
+ 	switch (args->madv) {
+ 	case I915_MADV_DONTNEED:
+@@ -4277,22 +4291,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 	    return -EINVAL;
+ 	}
+ 
++	ret = i915_mutex_lock_interruptible(dev);
++	if (ret)
++		return ret;
++
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL) {
+-		DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+-			  args->handle);
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto unlock;
+ 	}
+-
+-	mutex_lock(&dev->struct_mutex);
+ 	obj_priv = to_intel_bo(obj);
+ 
+ 	if (obj_priv->pin_count) {
+-		drm_gem_object_unreference(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-
+-		DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	if (obj_priv->madv != __I915_MADV_PURGED)
+@@ -4305,15 +4317,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ 
+ 	args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ 
++out:
+ 	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-
+-	return 0;
++	return ret;
+ }
+ 
+ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ 					      size_t size)
+ {
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj;
+ 
+ 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+@@ -4325,18 +4339,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ 		return NULL;
+ 	}
+ 
++	i915_gem_info_add_obj(dev_priv, size);
++
+ 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ 
+ 	obj->agp_type = AGP_USER_MEMORY;
+ 	obj->base.driver_private = NULL;
+ 	obj->fence_reg = I915_FENCE_REG_NONE;
+-	INIT_LIST_HEAD(&obj->list);
++	INIT_LIST_HEAD(&obj->mm_list);
++	INIT_LIST_HEAD(&obj->ring_list);
+ 	INIT_LIST_HEAD(&obj->gpu_write_list);
+ 	obj->madv = I915_MADV_WILLNEED;
+ 
+-	trace_i915_gem_object_create(&obj->base);
+-
+ 	return &obj->base;
+ }
+ 
+@@ -4356,7 +4371,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+ 
+ 	ret = i915_gem_object_unbind(obj);
+ 	if (ret == -ERESTARTSYS) {
+-		list_move(&obj_priv->list,
++		list_move(&obj_priv->mm_list,
+ 			  &dev_priv->mm.deferred_free_list);
+ 		return;
+ 	}
+@@ -4365,6 +4380,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+ 		i915_gem_free_mmap_offset(obj);
+ 
+ 	drm_gem_object_release(obj);
++	i915_gem_info_remove_obj(dev_priv, obj->size);
+ 
+ 	kfree(obj_priv->page_cpu_valid);
+ 	kfree(obj_priv->bit_17);
+@@ -4395,10 +4411,7 @@ i915_gem_idle(struct drm_device *dev)
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	if (dev_priv->mm.suspended ||
+-			(dev_priv->render_ring.gem_object == NULL) ||
+-			(HAS_BSD(dev) &&
+-			 dev_priv->bsd_ring.gem_object == NULL)) {
++	if (dev_priv->mm.suspended) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return 0;
+ 	}
+@@ -4423,7 +4436,7 @@ i915_gem_idle(struct drm_device *dev)
+ 	 * And not confound mm.suspended!
+ 	 */
+ 	dev_priv->mm.suspended = 1;
+-	del_timer(&dev_priv->hangcheck_timer);
++	del_timer_sync(&dev_priv->hangcheck_timer);
+ 
+ 	i915_kernel_lost_context(dev);
+ 	i915_gem_cleanup_ringbuffer(dev);
+@@ -4503,36 +4516,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+-	dev_priv->render_ring = render_ring;
+-
+-	if (!I915_NEED_GFX_HWS(dev)) {
+-		dev_priv->render_ring.status_page.page_addr
+-			= dev_priv->status_page_dmah->vaddr;
+-		memset(dev_priv->render_ring.status_page.page_addr,
+-				0, PAGE_SIZE);
+-	}
+-
+ 	if (HAS_PIPE_CONTROL(dev)) {
+ 		ret = i915_gem_init_pipe_control(dev);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
++	ret = intel_init_render_ring_buffer(dev);
+ 	if (ret)
+ 		goto cleanup_pipe_control;
+ 
+ 	if (HAS_BSD(dev)) {
+-		dev_priv->bsd_ring = bsd_ring;
+-		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
++		ret = intel_init_bsd_ring_buffer(dev);
+ 		if (ret)
+ 			goto cleanup_render_ring;
+ 	}
+ 
++	if (HAS_BLT(dev)) {
++		ret = intel_init_blt_ring_buffer(dev);
++		if (ret)
++			goto cleanup_bsd_ring;
++	}
++
+ 	dev_priv->next_seqno = 1;
+ 
+ 	return 0;
+ 
++cleanup_bsd_ring:
++	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ cleanup_render_ring:
+ 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ cleanup_pipe_control:
+@@ -4547,8 +4558,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+ 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+-	if (HAS_BSD(dev))
+-		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ 	if (HAS_PIPE_CONTROL(dev))
+ 		i915_gem_cleanup_pipe_control(dev);
+ }
+@@ -4577,15 +4588,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ 		return ret;
+ 	}
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
++	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ 	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+-	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-
++	BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
++	BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
+ 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ 	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+-	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
++	BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
++	BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+ 	ret = drm_irq_install(dev);
+@@ -4627,28 +4638,34 @@ i915_gem_lastclose(struct drm_device *dev)
+ 		DRM_ERROR("failed to idle hardware: %d\n", ret);
+ }
+ 
++static void
++init_ring_lists(struct intel_ring_buffer *ring)
++{
++	INIT_LIST_HEAD(&ring->active_list);
++	INIT_LIST_HEAD(&ring->request_list);
++	INIT_LIST_HEAD(&ring->gpu_write_list);
++}
++
+ void
+ i915_gem_load(struct drm_device *dev)
+ {
+ 	int i;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	spin_lock_init(&dev_priv->mm.active_list_lock);
++	INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+-	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
+ 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
+ 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ 	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
+-	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+-	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+-	if (HAS_BSD(dev)) {
+-		INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+-		INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+-	}
++	init_ring_lists(&dev_priv->render_ring);
++	init_ring_lists(&dev_priv->bsd_ring);
++	init_ring_lists(&dev_priv->blt_ring);
+ 	for (i = 0; i < 16; i++)
+ 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+ 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ 			  i915_gem_retire_work_handler);
++	init_completion(&dev_priv->error_completion);
+ 	spin_lock(&shrink_list_lock);
+ 	list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ 	spin_unlock(&shrink_list_lock);
+@@ -4667,21 +4684,30 @@ i915_gem_load(struct drm_device *dev)
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		dev_priv->fence_reg_start = 3;
+ 
+-	if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
++	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ 		dev_priv->num_fence_regs = 16;
+ 	else
+ 		dev_priv->num_fence_regs = 8;
+ 
+ 	/* Initialize fence registers to zero */
+-	if (IS_I965G(dev)) {
++	switch (INTEL_INFO(dev)->gen) {
++	case 6:
++		for (i = 0; i < 16; i++)
++			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
++		break;
++	case 5:
++	case 4:
+ 		for (i = 0; i < 16; i++)
+ 			I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+-	} else {
+-		for (i = 0; i < 8; i++)
+-			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
++		break;
++	case 3:
+ 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ 			for (i = 0; i < 8; i++)
+ 				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
++	case 2:
++		for (i = 0; i < 8; i++)
++			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
++		break;
+ 	}
+ 	i915_gem_detect_bit_6_swizzle(dev);
+ 	init_waitqueue_head(&dev_priv->pending_flip_queue);
+@@ -4691,8 +4717,8 @@ i915_gem_load(struct drm_device *dev)
+  * Create a physically contiguous memory object for this object
+  * e.g. for cursor + overlay regs
+  */
+-int i915_gem_init_phys_object(struct drm_device *dev,
+-			      int id, int size, int align)
++static int i915_gem_init_phys_object(struct drm_device *dev,
++				     int id, int size, int align)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_phys_object *phys_obj;
+@@ -4724,7 +4750,7 @@ kfree_obj:
+ 	return ret;
+ }
+ 
+-void i915_gem_free_phys_object(struct drm_device *dev, int id)
++static void i915_gem_free_phys_object(struct drm_device *dev, int id)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_phys_object *phys_obj;
+@@ -4853,34 +4879,48 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ 		     struct drm_file *file_priv)
+ {
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+-	void *obj_addr;
+-	int ret;
+-	char __user *user_data;
++	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
++	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 
+-	user_data = (char __user *) (uintptr_t) args->data_ptr;
+-	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
++	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
+ 
+-	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
+-	ret = copy_from_user(obj_addr, user_data, args->size);
+-	if (ret)
+-		return -EFAULT;
++	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
++		unsigned long unwritten;
++
++		/* The physical object once assigned is fixed for the lifetime
++		 * of the obj, so we can safely drop the lock and continue
++		 * to access vaddr.
++		 */
++		mutex_unlock(&dev->struct_mutex);
++		unwritten = copy_from_user(vaddr, user_data, args->size);
++		mutex_lock(&dev->struct_mutex);
++		if (unwritten)
++			return -EFAULT;
++	}
+ 
+ 	drm_agp_chipset_flush(dev);
+ 	return 0;
+ }
+ 
+-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
++void i915_gem_release(struct drm_device *dev, struct drm_file *file)
+ {
+-	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++	struct drm_i915_file_private *file_priv = file->driver_priv;
+ 
+ 	/* Clean up our request list when the client is going away, so that
+ 	 * later retire_requests won't dereference our soon-to-be-gone
+ 	 * file_priv.
+ 	 */
+-	mutex_lock(&dev->struct_mutex);
+-	while (!list_empty(&i915_file_priv->mm.request_list))
+-		list_del_init(i915_file_priv->mm.request_list.next);
+-	mutex_unlock(&dev->struct_mutex);
++	spin_lock(&file_priv->mm.lock);
++	while (!list_empty(&file_priv->mm.request_list)) {
++		struct drm_i915_gem_request *request;
++
++		request = list_first_entry(&file_priv->mm.request_list,
++					   struct drm_i915_gem_request,
++					   client_list);
++		list_del(&request->client_list);
++		request->file_priv = NULL;
++	}
++	spin_unlock(&file_priv->mm.lock);
+ }
+ 
+ static int
+@@ -4889,12 +4929,8 @@ i915_gpu_is_active(struct drm_device *dev)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int lists_empty;
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
+ 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+-		      list_empty(&dev_priv->render_ring.active_list);
+-	if (HAS_BSD(dev))
+-		lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
+-	spin_unlock(&dev_priv->mm.active_list_lock);
++		      list_empty(&dev_priv->mm.active_list);
+ 
+ 	return !lists_empty;
+ }
+@@ -4916,7 +4952,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+ 			if (mutex_trylock(&dev->struct_mutex)) {
+ 				list_for_each_entry(obj_priv,
+ 						    &dev_priv->mm.inactive_list,
+-						    list)
++						    mm_list)
+ 					cnt++;
+ 				mutex_unlock(&dev->struct_mutex);
+ 			}
+@@ -4942,7 +4978,7 @@ rescan:
+ 
+ 		list_for_each_entry_safe(obj_priv, next_obj,
+ 					 &dev_priv->mm.inactive_list,
+-					 list) {
++					 mm_list) {
+ 			if (i915_gem_object_is_purgeable(obj_priv)) {
+ 				i915_gem_object_unbind(&obj_priv->base);
+ 				if (--nr_to_scan <= 0)
+@@ -4971,7 +5007,7 @@ rescan:
+ 
+ 		list_for_each_entry_safe(obj_priv, next_obj,
+ 					 &dev_priv->mm.inactive_list,
+-					 list) {
++					 mm_list) {
+ 			if (nr_to_scan > 0) {
+ 				i915_gem_object_unbind(&obj_priv->base);
+ 				nr_to_scan--;
+diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
+index 80f380b..48644b8 100644
+--- a/drivers/gpu/drm/i915/i915_gem_debug.c
++++ b/drivers/gpu/drm/i915/i915_gem_debug.c
+@@ -30,29 +30,112 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ 
+-#if WATCH_INACTIVE
+-void
+-i915_verify_inactive(struct drm_device *dev, char *file, int line)
++#if WATCH_LISTS
++int
++i915_verify_lists(struct drm_device *dev)
+ {
++	static int warned;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_gem_object *obj;
+-	struct drm_i915_gem_object *obj_priv;
+-
+-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+-		obj = &obj_priv->base;
+-		if (obj_priv->pin_count || obj_priv->active ||
+-		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+-					   I915_GEM_DOMAIN_GTT)))
+-			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
++	struct drm_i915_gem_object *obj;
++	int err = 0;
++
++	if (warned)
++		return 0;
++
++	list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
++		if (obj->base.dev != dev ||
++		    !atomic_read(&obj->base.refcount.refcount)) {
++			DRM_ERROR("freed render active %p\n", obj);
++			err++;
++			break;
++		} else if (!obj->active ||
++			   (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
++			DRM_ERROR("invalid render active %p (a %d r %x)\n",
++				  obj,
++				  obj->active,
++				  obj->base.read_domains);
++			err++;
++		} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
++			DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
++				  obj,
++				  obj->base.write_domain,
++				  !list_empty(&obj->gpu_write_list));
++			err++;
++		}
++	}
++
++	list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
++		if (obj->base.dev != dev ||
++		    !atomic_read(&obj->base.refcount.refcount)) {
++			DRM_ERROR("freed flushing %p\n", obj);
++			err++;
++			break;
++		} else if (!obj->active ||
++			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
++			   list_empty(&obj->gpu_write_list)){
++			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
+ 				  obj,
+-				  obj_priv->pin_count, obj_priv->active,
+-				  obj->write_domain, file, line);
++				  obj->active,
++				  obj->base.write_domain,
++				  !list_empty(&obj->gpu_write_list));
++			err++;
++		}
++	}
++
++	list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
++		if (obj->base.dev != dev ||
++		    !atomic_read(&obj->base.refcount.refcount)) {
++			DRM_ERROR("freed gpu write %p\n", obj);
++			err++;
++			break;
++		} else if (!obj->active ||
++			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
++			DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
++				  obj,
++				  obj->active,
++				  obj->base.write_domain);
++			err++;
++		}
++	}
++
++	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
++		if (obj->base.dev != dev ||
++		    !atomic_read(&obj->base.refcount.refcount)) {
++			DRM_ERROR("freed inactive %p\n", obj);
++			err++;
++			break;
++		} else if (obj->pin_count || obj->active ||
++			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
++			DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
++				  obj,
++				  obj->pin_count, obj->active,
++				  obj->base.write_domain);
++			err++;
++		}
+ 	}
++
++	list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
++		if (obj->base.dev != dev ||
++		    !atomic_read(&obj->base.refcount.refcount)) {
++			DRM_ERROR("freed pinned %p\n", obj);
++			err++;
++			break;
++		} else if (!obj->pin_count || obj->active ||
++			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
++			DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
++				  obj,
++				  obj->pin_count, obj->active,
++				  obj->base.write_domain);
++			err++;
++		}
++	}
++
++	return warned = err;
+ }
+ #endif /* WATCH_INACTIVE */
+ 
+ 
+-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
++#if WATCH_EXEC | WATCH_PWRITE
+ static void
+ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ 		   uint32_t bias, uint32_t mark)
+@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ }
+ #endif
+ 
+-#if WATCH_LRU
+-void
+-i915_dump_lru(struct drm_device *dev, const char *where)
+-{
+-	drm_i915_private_t		*dev_priv = dev->dev_private;
+-	struct drm_i915_gem_object	*obj_priv;
+-
+-	DRM_INFO("active list %s {\n", where);
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+-			    list)
+-	{
+-		DRM_INFO("    %p: %08x\n", obj_priv,
+-			 obj_priv->last_rendering_seqno);
+-	}
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-	DRM_INFO("}\n");
+-	DRM_INFO("flushing list %s {\n", where);
+-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+-			    list)
+-	{
+-		DRM_INFO("    %p: %08x\n", obj_priv,
+-			 obj_priv->last_rendering_seqno);
+-	}
+-	DRM_INFO("}\n");
+-	DRM_INFO("inactive %s {\n", where);
+-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+-		DRM_INFO("    %p: %08x\n", obj_priv,
+-			 obj_priv->last_rendering_seqno);
+-	}
+-	DRM_INFO("}\n");
+-}
+-#endif
+-
+-
+ #if WATCH_COHERENCY
+ void
+ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index 5c428fa..d8ae7d1 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -31,49 +31,6 @@
+ #include "i915_drv.h"
+ #include "i915_drm.h"
+ 
+-static struct drm_i915_gem_object *
+-i915_gem_next_active_object(struct drm_device *dev,
+-			    struct list_head **render_iter,
+-			    struct list_head **bsd_iter)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+-
+-	if (*render_iter != &dev_priv->render_ring.active_list)
+-		render_obj = list_entry(*render_iter,
+-					struct drm_i915_gem_object,
+-					list);
+-
+-	if (HAS_BSD(dev)) {
+-		if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+-			bsd_obj = list_entry(*bsd_iter,
+-					     struct drm_i915_gem_object,
+-					     list);
+-
+-		if (render_obj == NULL) {
+-			*bsd_iter = (*bsd_iter)->next;
+-			return bsd_obj;
+-		}
+-
+-		if (bsd_obj == NULL) {
+-			*render_iter = (*render_iter)->next;
+-			return render_obj;
+-		}
+-
+-		/* XXX can we handle seqno wrapping? */
+-		if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
+-			*render_iter = (*render_iter)->next;
+-			return render_obj;
+-		} else {
+-			*bsd_iter = (*bsd_iter)->next;
+-			return bsd_obj;
+-		}
+-	} else {
+-		*render_iter = (*render_iter)->next;
+-		return render_obj;
+-	}
+-}
+-
+ static bool
+ mark_free(struct drm_i915_gem_object *obj_priv,
+ 	   struct list_head *unwind)
+@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
+ 	return drm_mm_scan_add_block(obj_priv->gtt_space);
+ }
+ 
+-#define i915_for_each_active_object(OBJ, R, B) \
+-	*(R) = dev_priv->render_ring.active_list.next; \
+-	*(B) = dev_priv->bsd_ring.active_list.next; \
+-	while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+-
+ int
+ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct list_head eviction_list, unwind_list;
+ 	struct drm_i915_gem_object *obj_priv;
+-	struct list_head *render_iter, *bsd_iter;
+ 	int ret = 0;
+ 
+ 	i915_gem_retire_requests(dev);
+@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ 	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ 
+ 	/* First see if there is a large enough contiguous idle region... */
+-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
+ 		if (mark_free(obj_priv, &unwind_list))
+ 			goto found;
+ 	}
+ 
+ 	/* Now merge in the soon-to-be-expired objects... */
+-	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
++	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ 		/* Does the object require an outstanding flush? */
+ 		if (obj_priv->base.write_domain || obj_priv->pin_count)
+ 			continue;
+@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ 	}
+ 
+ 	/* Finally add anything with a pending flush (in order of retirement) */
+-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
+ 		if (obj_priv->pin_count)
+ 			continue;
+ 
+ 		if (mark_free(obj_priv, &unwind_list))
+ 			goto found;
+ 	}
+-	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
++	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ 		if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ 			continue;
+ 
+@@ -212,14 +163,9 @@ i915_gem_evict_everything(struct drm_device *dev)
+ 	int ret;
+ 	bool lists_empty;
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
+ 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ 		       list_empty(&dev_priv->mm.flushing_list) &&
+-		       list_empty(&dev_priv->render_ring.active_list) &&
+-		       (!HAS_BSD(dev)
+-			|| list_empty(&dev_priv->bsd_ring.active_list)));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-
++		       list_empty(&dev_priv->mm.active_list));
+ 	if (lists_empty)
+ 		return -ENOSPC;
+ 
+@@ -234,13 +180,9 @@ i915_gem_evict_everything(struct drm_device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	spin_lock(&dev_priv->mm.active_list_lock);
+ 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ 		       list_empty(&dev_priv->mm.flushing_list) &&
+-		       list_empty(&dev_priv->render_ring.active_list) &&
+-		       (!HAS_BSD(dev)
+-			|| list_empty(&dev_priv->bsd_ring.active_list)));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
++		       list_empty(&dev_priv->mm.active_list));
+ 	BUG_ON(!lists_empty);
+ 
+ 	return 0;
+@@ -258,7 +200,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
+ 
+ 		obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ 					struct drm_i915_gem_object,
+-					list)->base;
++					mm_list)->base;
+ 
+ 		ret = i915_gem_object_unbind(obj);
+ 		if (ret != 0) {
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 710eca7..af352de 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ 
+-	if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++	if (IS_GEN5(dev) || IS_GEN6(dev)) {
+ 		/* On Ironlake whatever DRAM config, GPU always do
+ 		 * same swizzling setup.
+ 		 */
+ 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ 		swizzle_y = I915_BIT_6_SWIZZLE_9;
+-	} else if (!IS_I9XX(dev)) {
++	} else if (IS_GEN2(dev)) {
+ 		/* As far as we know, the 865 doesn't have these bit 6
+ 		 * swizzling issues.
+ 		 */
+@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ 	if (tiling_mode == I915_TILING_NONE)
+ 		return true;
+ 
+-	if (!IS_I9XX(dev) ||
++	if (IS_GEN2(dev) ||
+ 	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ 		tile_width = 128;
+ 	else
+ 		tile_width = 512;
+ 
+ 	/* check maximum stride & object size */
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		/* i965 stores the end address of the gtt mapping in the fence
+ 		 * reg, so dont bother to check the size */
+ 		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ 			return false;
+-	} else if (IS_GEN3(dev) || IS_GEN2(dev)) {
++	} else {
+ 		if (stride > 8192)
+ 			return false;
+ 
+@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ 	}
+ 
+ 	/* 965+ just needs multiples of tile width */
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		if (stride & (tile_width - 1))
+ 			return false;
+ 		return true;
+@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+ 	if (tiling_mode == I915_TILING_NONE)
+ 		return true;
+ 
+-	if (!IS_I965G(dev)) {
+-		if (obj_priv->gtt_offset & (obj->size - 1))
++	if (INTEL_INFO(dev)->gen >= 4)
++		return true;
++
++	if (obj_priv->gtt_offset & (obj->size - 1))
++		return false;
++
++	if (IS_GEN3(dev)) {
++		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
++			return false;
++	} else {
++		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ 			return false;
+-		if (IS_I9XX(dev)) {
+-			if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+-				return false;
+-		} else {
+-			if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+-				return false;
+-		}
+ 	}
+ 
+ 	return true;
+@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
+-	int ret = 0;
++	int ret;
++
++	ret = i915_gem_check_is_wedged(dev);
++	if (ret)
++		return ret;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ 	if (obj == NULL)
+@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ 		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
+ 			ret = i915_gem_object_unbind(obj);
+ 		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+-			ret = i915_gem_object_put_fence_reg(obj);
++			ret = i915_gem_object_put_fence_reg(obj, true);
+ 		else
+ 			i915_gem_release_mmap(obj);
+ 
+@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
+  * bit 17 of its physical address and therefore being interpreted differently
+  * by the GPU.
+  */
+-static int
++static void
+ i915_gem_swizzle_page(struct page *page)
+ {
++	char temp[64];
+ 	char *vaddr;
+ 	int i;
+-	char temp[64];
+ 
+ 	vaddr = kmap(page);
+-	if (vaddr == NULL)
+-		return -ENOMEM;
+ 
+ 	for (i = 0; i < PAGE_SIZE; i += 128) {
+ 		memcpy(temp, &vaddr[i], 64);
+@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
+ 	}
+ 
+ 	kunmap(page);
+-
+-	return 0;
+ }
+ 
+ void
+@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+ 		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ 		if ((new_bit_17 & 0x1) !=
+ 		    (test_bit(i, obj_priv->bit_17) != 0)) {
+-			int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
+-			if (ret != 0) {
+-				DRM_ERROR("Failed to swizzle page\n");
+-				return;
+-			}
++			i915_gem_swizzle_page(obj_priv->pages[i]);
+ 			set_page_dirty(obj_priv->pages[i]);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 744225e..237b8bd 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ }
+ 
+ /* For display hotplug interrupt */
+-void
++static void
+ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ 	if ((dev_priv->irq_mask_reg & mask) != 0) {
+@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
+ 	else {
+ 		i915_enable_pipestat(dev_priv, 1,
+ 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
+-		if (IS_I965G(dev))
++		if (INTEL_INFO(dev)->gen >= 4)
+ 			i915_enable_pipestat(dev_priv, 0,
+ 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
+ 	}
+@@ -191,12 +191,7 @@ static int
+ i915_pipe_enabled(struct drm_device *dev, int pipe)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+-	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+-
+-	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+-		return 1;
+-
+-	return 0;
++	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ }
+ 
+ /* Called from drm generic code, passed a 'crtc', which
+@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	unsigned long high_frame;
+ 	unsigned long low_frame;
+-	u32 high1, high2, low, count;
+-
+-	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+-	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++	u32 high1, high2, low;
+ 
+ 	if (!i915_pipe_enabled(dev, pipe)) {
+ 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+ 		return 0;
+ 	}
+ 
++	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
+ 	/*
+ 	 * High & low register fields aren't synchronized, so make sure
+ 	 * we get a low value that's stable across two reads of the high
+ 	 * register.
+ 	 */
+ 	do {
+-		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+-			 PIPE_FRAME_HIGH_SHIFT);
+-		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+-			PIPE_FRAME_LOW_SHIFT);
+-		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+-			 PIPE_FRAME_HIGH_SHIFT);
++		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
++		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
++		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ 	} while (high1 != high2);
+ 
+-	count = (high1 << 8) | low;
+-
+-	return count;
++	high1 >>= PIPE_FRAME_HIGH_SHIFT;
++	low >>= PIPE_FRAME_LOW_SHIFT;
++	return (high1 << 8) | low;
+ }
+ 
+ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
+ 						    hotplug_work);
+ 	struct drm_device *dev = dev_priv->dev;
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_encoder *encoder;
+-
+-	if (mode_config->num_encoder) {
+-		list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+-			struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-	
+-			if (intel_encoder->hot_plug)
+-				(*intel_encoder->hot_plug) (intel_encoder);
+-		}
+-	}
++	struct intel_encoder *encoder;
++
++	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
++		if (encoder->hot_plug)
++			encoder->hot_plug(encoder);
++
+ 	/* Just fire off a uevent and let userspace tell us what to do */
+ 	drm_helper_hpd_irq_event(dev);
+ }
+@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
+ 	return;
+ }
+ 
+-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
++static void notify_ring(struct drm_device *dev,
++			struct intel_ring_buffer *ring)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 seqno = ring->get_seqno(dev, ring);
++	ring->irq_gem_seqno = seqno;
++	trace_i915_gem_request_complete(dev, seqno);
++	wake_up_all(&ring->irq_queue);
++	dev_priv->hangcheck_count = 0;
++	mod_timer(&dev_priv->hangcheck_timer,
++		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
++}
++
++static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	int ret = IRQ_NONE;
+ 	u32 de_iir, gt_iir, de_ier, pch_iir;
++	u32 hotplug_mask;
+ 	struct drm_i915_master_private *master_priv;
+-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
++	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
++
++	if (IS_GEN6(dev))
++		bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+ 
+ 	/* disable master interrupt before clearing iir  */
+ 	de_ier = I915_READ(DEIER);
+@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ 		goto done;
+ 
++	if (HAS_PCH_CPT(dev))
++		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
++	else
++		hotplug_mask = SDE_HOTPLUG_MASK;
++
+ 	ret = IRQ_HANDLED;
+ 
+ 	if (dev->primary->master) {
+@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ 				READ_BREADCRUMB(dev_priv);
+ 	}
+ 
+-	if (gt_iir & GT_PIPE_NOTIFY) {
+-		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+-		render_ring->irq_gem_seqno = seqno;
+-		trace_i915_gem_request_complete(dev, seqno);
+-		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+-		dev_priv->hangcheck_count = 0;
+-		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+-	}
+-	if (gt_iir & GT_BSD_USER_INTERRUPT)
+-		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+-
++	if (gt_iir & GT_PIPE_NOTIFY)
++		notify_ring(dev, &dev_priv->render_ring);
++	if (gt_iir & bsd_usr_interrupt)
++		notify_ring(dev, &dev_priv->bsd_ring);
++	if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
++		notify_ring(dev, &dev_priv->blt_ring);
+ 
+ 	if (de_iir & DE_GSE)
+-		ironlake_opregion_gse_intr(dev);
++		intel_opregion_gse_intr(dev);
+ 
+ 	if (de_iir & DE_PLANEA_FLIP_DONE) {
+ 		intel_prepare_page_flip(dev, 0);
+-		intel_finish_page_flip(dev, 0);
++		intel_finish_page_flip_plane(dev, 0);
+ 	}
+ 
+ 	if (de_iir & DE_PLANEB_FLIP_DONE) {
+ 		intel_prepare_page_flip(dev, 1);
+-		intel_finish_page_flip(dev, 1);
++		intel_finish_page_flip_plane(dev, 1);
+ 	}
+ 
+ 	if (de_iir & DE_PIPEA_VBLANK)
+@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ 		drm_handle_vblank(dev, 1);
+ 
+ 	/* check event from PCH */
+-	if ((de_iir & DE_PCH_EVENT) &&
+-	    (pch_iir & SDE_HOTPLUG_MASK)) {
++	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
+ 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+-	}
+ 
+ 	if (de_iir & DE_PCU_EVENT) {
+ 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
+ 	char *reset_event[] = { "RESET=1", NULL };
+ 	char *reset_done_event[] = { "ERROR=0", NULL };
+ 
+-	DRM_DEBUG_DRIVER("generating error event\n");
+ 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+ 
+ 	if (atomic_read(&dev_priv->mm.wedged)) {
+-		if (IS_I965G(dev)) {
+-			DRM_DEBUG_DRIVER("resetting chip\n");
+-			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+-			if (!i965_reset(dev, GDRST_RENDER)) {
+-				atomic_set(&dev_priv->mm.wedged, 0);
+-				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+-			}
+-		} else {
+-			DRM_DEBUG_DRIVER("reboot required\n");
++		DRM_DEBUG_DRIVER("resetting chip\n");
++		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
++		if (!i915_reset(dev, GRDOM_RENDER)) {
++			atomic_set(&dev_priv->mm.wedged, 0);
++			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ 		}
++		complete_all(&dev_priv->error_completion);
+ 	}
+ }
+ 
++#ifdef CONFIG_DEBUG_FS
+ static struct drm_i915_error_object *
+ i915_error_object_create(struct drm_device *dev,
+ 			 struct drm_gem_object *src)
+@@ -511,7 +511,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+ 
+ 	if (IS_I830(dev) || IS_845G(dev))
+ 		cmd = MI_BATCH_BUFFER;
+-	else if (IS_I965G(dev))
++	else if (INTEL_INFO(dev)->gen >= 4)
+ 		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+ 		       MI_BATCH_NON_SECURE_I965);
+ 	else
+@@ -584,13 +584,16 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 		return;
+ 	}
+ 
+-	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
++	DRM_DEBUG_DRIVER("generating error event\n");
++
++	error->seqno =
++		dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ 	error->eir = I915_READ(EIR);
+ 	error->pgtbl_er = I915_READ(PGTBL_ER);
+ 	error->pipeastat = I915_READ(PIPEASTAT);
+ 	error->pipebstat = I915_READ(PIPEBSTAT);
+ 	error->instpm = I915_READ(INSTPM);
+-	if (!IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen < 4) {
+ 		error->ipeir = I915_READ(IPEIR);
+ 		error->ipehr = I915_READ(IPEHR);
+ 		error->instdone = I915_READ(INSTDONE);
+@@ -612,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 	batchbuffer[0] = NULL;
+ 	batchbuffer[1] = NULL;
+ 	count = 0;
+-	list_for_each_entry(obj_priv,
+-			&dev_priv->render_ring.active_list, list) {
+-
++	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ 		struct drm_gem_object *obj = &obj_priv->base;
+ 
+ 		if (batchbuffer[0] == NULL &&
+@@ -631,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 	}
+ 	/* Scan the other lists for completeness for those bizarre errors. */
+ 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+-		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
+ 			struct drm_gem_object *obj = &obj_priv->base;
+ 
+ 			if (batchbuffer[0] == NULL &&
+@@ -649,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 		}
+ 	}
+ 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+-		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
+ 			struct drm_gem_object *obj = &obj_priv->base;
+ 
+ 			if (batchbuffer[0] == NULL &&
+@@ -668,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 	}
+ 
+ 	/* We need to copy these to an anonymous buffer as the simplest
+-	 * method to avoid being overwritten by userpace.
++	 * method to avoid being overwritten by userspace.
+ 	 */
+ 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+ 	if (batchbuffer[1] != batchbuffer[0])
+@@ -690,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 
+ 	if (error->active_bo) {
+ 		int i = 0;
+-		list_for_each_entry(obj_priv,
+-				&dev_priv->render_ring.active_list, list) {
++		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ 			struct drm_gem_object *obj = &obj_priv->base;
+ 
+ 			error->active_bo[i].size = obj->size;
+@@ -744,6 +744,9 @@ void i915_destroy_error_state(struct drm_device *dev)
+ 	if (error)
+ 		i915_error_state_free(dev, error);
+ }
++#else
++#define i915_capture_error_state(x)
++#endif
+ 
+ static void i915_report_and_clear_eir(struct drm_device *dev)
+ {
+@@ -785,7 +788,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
+ 		}
+ 	}
+ 
+-	if (IS_I9XX(dev)) {
++	if (!IS_GEN2(dev)) {
+ 		if (eir & I915_ERROR_PAGE_TABLE) {
+ 			u32 pgtbl_err = I915_READ(PGTBL_ER);
+ 			printk(KERN_ERR "page table error\n");
+@@ -811,7 +814,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
+ 		printk(KERN_ERR "instruction error\n");
+ 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
+ 		       I915_READ(INSTPM));
+-		if (!IS_I965G(dev)) {
++		if (INTEL_INFO(dev)->gen < 4) {
+ 			u32 ipeir = I915_READ(IPEIR);
+ 
+ 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
+@@ -876,12 +879,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
+ 	i915_report_and_clear_eir(dev);
+ 
+ 	if (wedged) {
++		INIT_COMPLETION(dev_priv->error_completion);
+ 		atomic_set(&dev_priv->mm.wedged, 1);
+ 
+ 		/*
+ 		 * Wakeup waiting processes so they don't hang
+ 		 */
+-		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
++		wake_up_all(&dev_priv->render_ring.irq_queue);
++		if (HAS_BSD(dev))
++			wake_up_all(&dev_priv->bsd_ring.irq_queue);
++		if (HAS_BLT(dev))
++			wake_up_all(&dev_priv->blt_ring.irq_queue);
+ 	}
+ 
+ 	queue_work(dev_priv->wq, &dev_priv->error_work);
+@@ -912,7 +920,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+ 
+ 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ 	obj_priv = to_intel_bo(work->pending_flip_obj);
+-	if(IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ 		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ 	} else {
+@@ -942,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 	unsigned long irqflags;
+ 	int irq_received;
+ 	int ret = IRQ_NONE;
+-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ 
+ 	atomic_inc(&dev_priv->irq_received);
+ 
+@@ -951,7 +958,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 
+ 	iir = I915_READ(IIR);
+ 
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+ 	else
+ 		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
+@@ -1019,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 					READ_BREADCRUMB(dev_priv);
+ 		}
+ 
+-		if (iir & I915_USER_INTERRUPT) {
+-			u32 seqno =
+-				render_ring->get_gem_seqno(dev, render_ring);
+-			render_ring->irq_gem_seqno = seqno;
+-			trace_i915_gem_request_complete(dev, seqno);
+-			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+-			dev_priv->hangcheck_count = 0;
+-			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+-		}
+-
++		if (iir & I915_USER_INTERRUPT)
++			notify_ring(dev, &dev_priv->render_ring);
+ 		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+-			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
++			notify_ring(dev, &dev_priv->bsd_ring);
+ 
+ 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ 			intel_prepare_page_flip(dev, 0);
+@@ -1065,7 +1064,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ 		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ 		    (iir & I915_ASLE_INTERRUPT))
+-			opregion_asle_intr(dev);
++			intel_opregion_asle_intr(dev);
+ 
+ 		/* With MSI, interrupts are only generated when iir
+ 		 * transitions from zero to nonzero.  If another bit got
+@@ -1207,18 +1206,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	unsigned long irqflags;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	u32 pipeconf;
+ 
+-	pipeconf = I915_READ(pipeconf_reg);
+-	if (!(pipeconf & PIPEACONF_ENABLE))
++	if (!i915_pipe_enabled(dev, pipe))
+ 		return -EINVAL;
+ 
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ 	if (HAS_PCH_SPLIT(dev))
+ 		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
+ 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+-	else if (IS_I965G(dev))
++	else if (INTEL_INFO(dev)->gen >= 4)
+ 		i915_enable_pipestat(dev_priv, pipe,
+ 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ 	else
+@@ -1252,7 +1248,7 @@ void i915_enable_interrupt (struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+ 	if (!HAS_PCH_SPLIT(dev))
+-		opregion_enable_asle(dev);
++		intel_opregion_enable_asle(dev);
+ 	dev_priv->irq_enabled = 1;
+ }
+ 
+@@ -1311,7 +1307,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ 	return -EINVAL;
+ }
+ 
+-struct drm_i915_gem_request *
++static struct drm_i915_gem_request *
+ i915_get_tail_request(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -1331,11 +1327,7 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	uint32_t acthd, instdone, instdone1;
+ 
+-	/* No reset support on this chip yet. */
+-	if (IS_GEN6(dev))
+-		return;
+-
+-	if (!IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen < 4) {
+ 		acthd = I915_READ(ACTHD);
+ 		instdone = I915_READ(INSTDONE);
+ 		instdone1 = 0;
+@@ -1347,9 +1339,8 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 
+ 	/* If all work is done then ACTHD clearly hasn't advanced. */
+ 	if (list_empty(&dev_priv->render_ring.request_list) ||
+-		i915_seqno_passed(i915_get_gem_seqno(dev,
+-				&dev_priv->render_ring),
+-			i915_get_tail_request(dev)->seqno)) {
++		i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
++				  i915_get_tail_request(dev)->seqno)) {
+ 		bool missed_wakeup = false;
+ 
+ 		dev_priv->hangcheck_count = 0;
+@@ -1357,13 +1348,19 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 		/* Issue a wake-up to catch stuck h/w. */
+ 		if (dev_priv->render_ring.waiting_gem_seqno &&
+ 		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+-			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
++			wake_up_all(&dev_priv->render_ring.irq_queue);
+ 			missed_wakeup = true;
+ 		}
+ 
+ 		if (dev_priv->bsd_ring.waiting_gem_seqno &&
+ 		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+-			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
++			wake_up_all(&dev_priv->bsd_ring.irq_queue);
++			missed_wakeup = true;
++		}
++
++		if (dev_priv->blt_ring.waiting_gem_seqno &&
++		    waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
++			wake_up_all(&dev_priv->blt_ring.irq_queue);
+ 			missed_wakeup = true;
+ 		}
+ 
+@@ -1377,6 +1374,21 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 	    dev_priv->last_instdone1 == instdone1) {
+ 		if (dev_priv->hangcheck_count++ > 1) {
+ 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
++
++			if (!IS_GEN2(dev)) {
++				/* Is the chip hanging on a WAIT_FOR_EVENT?
++				 * If so we can simply poke the RB_WAIT bit
++				 * and break the hang. This should work on
++				 * all but the second generation chipsets.
++				 */
++				u32 tmp = I915_READ(PRB0_CTL);
++				if (tmp & RING_WAIT) {
++					I915_WRITE(PRB0_CTL, tmp);
++					POSTING_READ(PRB0_CTL);
++					goto out;
++				}
++			}
++
+ 			i915_handle_error(dev, true);
+ 			return;
+ 		}
+@@ -1388,8 +1400,10 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 		dev_priv->last_instdone1 = instdone1;
+ 	}
+ 
++out:
+ 	/* Reset timer case chip hangs without another request being added */
+-	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
++	mod_timer(&dev_priv->hangcheck_timer,
++		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
+ 
+ /* drm_dma.h hooks
+@@ -1424,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
+ 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ 	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+-	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+-			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
++	u32 hotplug_mask;
+ 
+ 	dev_priv->irq_mask_reg = ~display_mask;
+ 	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+@@ -1436,20 +1449,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
+ 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ 	(void) I915_READ(DEIER);
+ 
+-	/* Gen6 only needs render pipe_control now */
+-	if (IS_GEN6(dev))
+-		render_mask = GT_PIPE_NOTIFY;
++	if (IS_GEN6(dev)) {
++		render_mask =
++			GT_PIPE_NOTIFY |
++			GT_GEN6_BSD_USER_INTERRUPT |
++			GT_BLT_USER_INTERRUPT;
++	}
+ 
+ 	dev_priv->gt_irq_mask_reg = ~render_mask;
+ 	dev_priv->gt_irq_enable_reg = render_mask;
+ 
+ 	I915_WRITE(GTIIR, I915_READ(GTIIR));
+ 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+-	if (IS_GEN6(dev))
++	if (IS_GEN6(dev)) {
+ 		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
++		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
++		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
++	}
++
+ 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
+ 	(void) I915_READ(GTIER);
+ 
++	if (HAS_PCH_CPT(dev)) {
++		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
++			       SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
++	} else {
++		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
++			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
++	}
++
+ 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+ 	dev_priv->pch_irq_enable_reg = hotplug_mask;
+ 
+@@ -1506,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ 	u32 error_mask;
+ 
+ 	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+-
+ 	if (HAS_BSD(dev))
+ 		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
++	if (HAS_BLT(dev))
++		DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+ 
+ 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ 
+@@ -1578,7 +1607,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ 	}
+ 
+-	opregion_enable_asle(dev);
++	intel_opregion_enable_asle(dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4f5e155..25ed911 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -25,52 +25,16 @@
+ #ifndef _I915_REG_H_
+ #define _I915_REG_H_
+ 
++#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
++
+ /*
+  * The Bridge device's PCI config space has information about the
+  * fb aperture size and the amount of pre-reserved memory.
++ * This is all handled in the intel-gtt.ko module. i915.ko only
++ * cares about the vga bit for the vga rbiter.
+  */
+ #define INTEL_GMCH_CTRL		0x52
+ #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+-#define INTEL_GMCH_ENABLED	0x4
+-#define INTEL_GMCH_MEM_MASK	0x1
+-#define INTEL_GMCH_MEM_64M	0x1
+-#define INTEL_GMCH_MEM_128M	0
+-
+-#define INTEL_GMCH_GMS_MASK		(0xf << 4)
+-#define INTEL_855_GMCH_GMS_DISABLED	(0x0 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_1M	(0x1 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_4M	(0x2 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_8M	(0x3 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+-
+-#define INTEL_915G_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+-#define INTEL_915G_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_128M	(0x8 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_256M	(0x9 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
+-#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
+-#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
+-#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
+-
+-#define SNB_GMCH_CTRL	0x50
+-#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
+-#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
+-#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
+-#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
+-#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
+-#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
+-#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
+-#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
+-#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
+-#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
+-#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
+-#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
+-#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
+-#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
+-#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
+-#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
+-#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
+ 
+ /* PCI config space */
+ 
+@@ -106,10 +70,13 @@
+ #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+ #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
+ #define LBB	0xf4
+-#define GDRST 0xc0
+-#define  GDRST_FULL	(0<<2)
+-#define  GDRST_RENDER	(1<<2)
+-#define  GDRST_MEDIA	(3<<2)
++
++/* Graphics reset regs */
++#define I965_GDRST 0xc0 /* PCI config register */
++#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
++#define  GRDOM_FULL	(0<<2)
++#define  GRDOM_RENDER	(1<<2)
++#define  GRDOM_MEDIA	(3<<2)
+ 
+ /* VGA stuff */
+ 
+@@ -192,11 +159,11 @@
+ #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+ #define   MI_STORE_DWORD_INDEX_SHIFT 2
+ #define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
++#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
+ #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+ #define   MI_BATCH_NON_SECURE	(1)
+ #define   MI_BATCH_NON_SECURE_I965 (1<<8)
+ #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+-
+ /*
+  * 3D instructions used by the kernel
+  */
+@@ -249,6 +216,16 @@
+ #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+ #define   PIPE_CONTROL_STALL_EN	(1<<1) /* in addr word, Ironlake+ only */
+ 
++
++/*
++ * Reset registers
++ */
++#define DEBUG_RESET_I830		0x6070
++#define  DEBUG_RESET_FULL		(1<<7)
++#define  DEBUG_RESET_RENDER		(1<<8)
++#define  DEBUG_RESET_DISPLAY		(1<<9)
++
++
+ /*
+  * Fence registers
+  */
+@@ -283,6 +260,17 @@
+ #define PRB0_HEAD	0x02034
+ #define PRB0_START	0x02038
+ #define PRB0_CTL	0x0203c
++#define RENDER_RING_BASE	0x02000
++#define BSD_RING_BASE		0x04000
++#define GEN6_BSD_RING_BASE	0x12000
++#define BLT_RING_BASE		0x22000
++#define RING_TAIL(base)		((base)+0x30)
++#define RING_HEAD(base)		((base)+0x34)
++#define RING_START(base)	((base)+0x38)
++#define RING_CTL(base)		((base)+0x3c)
++#define RING_HWS_PGA(base)	((base)+0x80)
++#define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
++#define RING_ACTHD(base)	((base)+0x74)
+ #define   TAIL_ADDR		0x001FFFF8
+ #define   HEAD_WRAP_COUNT	0xFFE00000
+ #define   HEAD_WRAP_ONE		0x00200000
+@@ -295,6 +283,8 @@
+ #define   RING_VALID_MASK	0x00000001
+ #define   RING_VALID		0x00000001
+ #define   RING_INVALID		0x00000000
++#define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
++#define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
+ #define PRB1_TAIL	0x02040 /* 915+ only */
+ #define PRB1_HEAD	0x02044 /* 915+ only */
+ #define PRB1_START	0x02048 /* 915+ only */
+@@ -306,7 +296,6 @@
+ #define INSTDONE1	0x0207c /* 965+ only */
+ #define ACTHD_I965	0x02074
+ #define HWS_PGA		0x02080
+-#define HWS_PGA_GEN6	0x04080
+ #define HWS_ADDRESS_MASK	0xfffff000
+ #define HWS_START_ADDRESS_SHIFT	4
+ #define PWRCTXA		0x2088 /* 965GM+ only */
+@@ -464,17 +453,17 @@
+ #define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
+ #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
+ #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
+-/*
+- * BSD (bit stream decoder instruction and interrupt control register defines
+- * (G4X and Ironlake only)
+- */
+ 
+-#define BSD_RING_TAIL          0x04030
+-#define BSD_RING_HEAD          0x04034
+-#define BSD_RING_START         0x04038
+-#define BSD_RING_CTL           0x0403c
+-#define BSD_RING_ACTHD         0x04074
+-#define BSD_HWS_PGA            0x04080
++#define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
++#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16)
++#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0)
++#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE		0
++#define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3)
++
++#define GEN6_BSD_IMR			0x120a8
++#define   GEN6_BSD_IMR_USER_INTERRUPT	(1 << 12)
++
++#define GEN6_BSD_RNCID			0x12198
+ 
+ /*
+  * Framebuffer compression (915+ only)
+@@ -579,12 +568,51 @@
+ # define GPIO_DATA_VAL_IN		(1 << 12)
+ # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+ 
+-#define GMBUS0			0x5100
+-#define GMBUS1			0x5104
+-#define GMBUS2			0x5108
+-#define GMBUS3			0x510c
+-#define GMBUS4			0x5110
+-#define GMBUS5			0x5120
++#define GMBUS0			0x5100 /* clock/port select */
++#define   GMBUS_RATE_100KHZ	(0<<8)
++#define   GMBUS_RATE_50KHZ	(1<<8)
++#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
++#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
++#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
++#define   GMBUS_PORT_DISABLED	0
++#define   GMBUS_PORT_SSC	1
++#define   GMBUS_PORT_VGADDC	2
++#define   GMBUS_PORT_PANEL	3
++#define   GMBUS_PORT_DPC	4 /* HDMIC */
++#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
++				  /* 6 reserved */
++#define   GMBUS_PORT_DPD	7 /* HDMID */
++#define   GMBUS_NUM_PORTS       8
++#define GMBUS1			0x5104 /* command/status */
++#define   GMBUS_SW_CLR_INT	(1<<31)
++#define   GMBUS_SW_RDY		(1<<30)
++#define   GMBUS_ENT		(1<<29) /* enable timeout */
++#define   GMBUS_CYCLE_NONE	(0<<25)
++#define   GMBUS_CYCLE_WAIT	(1<<25)
++#define   GMBUS_CYCLE_INDEX	(2<<25)
++#define   GMBUS_CYCLE_STOP	(4<<25)
++#define   GMBUS_BYTE_COUNT_SHIFT 16
++#define   GMBUS_SLAVE_INDEX_SHIFT 8
++#define   GMBUS_SLAVE_ADDR_SHIFT 1
++#define   GMBUS_SLAVE_READ	(1<<0)
++#define   GMBUS_SLAVE_WRITE	(0<<0)
++#define GMBUS2			0x5108 /* status */
++#define   GMBUS_INUSE		(1<<15)
++#define   GMBUS_HW_WAIT_PHASE	(1<<14)
++#define   GMBUS_STALL_TIMEOUT	(1<<13)
++#define   GMBUS_INT		(1<<12)
++#define   GMBUS_HW_RDY		(1<<11)
++#define   GMBUS_SATOER		(1<<10)
++#define   GMBUS_ACTIVE		(1<<9)
++#define GMBUS3			0x510c /* data buffer bytes 3-0 */
++#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
++#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
++#define   GMBUS_NAK_EN		(1<<3)
++#define   GMBUS_IDLE_EN		(1<<2)
++#define   GMBUS_HW_WAIT_EN	(1<<1)
++#define   GMBUS_HW_RDY_EN	(1<<0)
++#define GMBUS5			0x5120 /* byte index */
++#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
+ 
+ /*
+  * Clock control & power management
+@@ -603,6 +631,7 @@
+ #define   VGA1_PD_P1_MASK	(0x1f << 8)
+ #define DPLL_A	0x06014
+ #define DPLL_B	0x06018
++#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+ #define   DPLL_VCO_ENABLE		(1 << 31)
+ #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
+ #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+@@ -633,31 +662,6 @@
+ #define LVDS			0x61180
+ #define LVDS_ON			(1<<31)
+ 
+-#define ADPA			0x61100
+-#define ADPA_DPMS_MASK		(~(3<<10))
+-#define ADPA_DPMS_ON		(0<<10)
+-#define ADPA_DPMS_SUSPEND	(1<<10)
+-#define ADPA_DPMS_STANDBY	(2<<10)
+-#define ADPA_DPMS_OFF		(3<<10)
+-
+-#define RING_TAIL		0x00
+-#define TAIL_ADDR		0x001FFFF8
+-#define RING_HEAD		0x04
+-#define HEAD_WRAP_COUNT		0xFFE00000
+-#define HEAD_WRAP_ONE		0x00200000
+-#define HEAD_ADDR		0x001FFFFC
+-#define RING_START		0x08
+-#define START_ADDR		0xFFFFF000
+-#define RING_LEN		0x0C
+-#define RING_NR_PAGES		0x001FF000
+-#define RING_REPORT_MASK	0x00000006
+-#define RING_REPORT_64K		0x00000002
+-#define RING_REPORT_128K	0x00000004
+-#define RING_NO_REPORT		0x00000000
+-#define RING_VALID_MASK		0x00000001
+-#define RING_VALID		0x00000001
+-#define RING_INVALID		0x00000000
+-
+ /* Scratch pad debug 0 reg:
+  */
+ #define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+@@ -736,10 +740,13 @@
+ #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+ #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+ #define DPLL_B_MD 0x06020 /* 965+ only */
++#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
+ #define FPA0	0x06040
+ #define FPA1	0x06044
+ #define FPB0	0x06048
+ #define FPB1	0x0604c
++#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
++#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+ #define   FP_N_DIV_MASK		0x003f0000
+ #define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
+ #define   FP_N_DIV_SHIFT		16
+@@ -760,6 +767,7 @@
+ #define   DPLLA_TEST_M_BYPASS		(1 << 2)
+ #define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+ #define D_STATE		0x6104
++#define  DSTATE_GFX_RESET_I830			(1<<6)
+ #define  DSTATE_PLL_D3_OFF			(1<<3)
+ #define  DSTATE_GFX_CLOCK_GATING		(1<<1)
+ #define  DSTATE_DOT_CLOCK_GATING		(1<<0)
+@@ -926,6 +934,8 @@
+ #define CLKCFG_MEM_800					(3 << 4)
+ #define CLKCFG_MEM_MASK					(7 << 4)
+ 
++#define TSC1			0x11001
++#define   TSE			(1<<0)
+ #define TR1			0x11006
+ #define TSFS			0x11020
+ #define   TSFS_SLOPE_MASK	0x0000ff00
+@@ -1070,6 +1080,8 @@
+ #define   MEMSTAT_SRC_CTL_STDBY 3
+ #define RCPREVBSYTUPAVG		0x113b8
+ #define RCPREVBSYTDNAVG		0x113bc
++#define PMMISC			0x11214
++#define   MCPPCE_EN		(1<<0) /* enable PM_MSG from PCH->MPC */
+ #define SDEW			0x1124c
+ #define CSIEW0			0x11250
+ #define CSIEW1			0x11254
+@@ -1150,6 +1162,15 @@
+ #define PIPEBSRC	0x6101c
+ #define BCLRPAT_B	0x61020
+ 
++#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
++#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
++#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
++#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
++#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
++#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
++#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
++#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
++
+ /* VGA port control */
+ #define ADPA			0x61100
+ #define   ADPA_DAC_ENABLE	(1<<31)
+@@ -1173,6 +1194,7 @@
+ #define   ADPA_DPMS_STANDBY	(2<<10)
+ #define   ADPA_DPMS_OFF		(3<<10)
+ 
++
+ /* Hotplug control (945+ only) */
+ #define PORT_HOTPLUG_EN		0x61110
+ #define   HDMIB_HOTPLUG_INT_EN			(1 << 29)
+@@ -1331,6 +1353,22 @@
+ #define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+ #define   LVDS_B0B3_POWER_UP		(3 << 2)
+ 
++/* Video Data Island Packet control */
++#define VIDEO_DIP_DATA		0x61178
++#define VIDEO_DIP_CTL		0x61170
++#define   VIDEO_DIP_ENABLE		(1 << 31)
++#define   VIDEO_DIP_PORT_B		(1 << 29)
++#define   VIDEO_DIP_PORT_C		(2 << 29)
++#define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
++#define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
++#define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
++#define   VIDEO_DIP_SELECT_AVI		(0 << 19)
++#define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
++#define   VIDEO_DIP_SELECT_SPD		(3 << 19)
++#define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
++#define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
++#define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
++
+ /* Panel power sequencing */
+ #define PP_STATUS	0x61200
+ #define   PP_ON		(1 << 31)
+@@ -1346,6 +1384,9 @@
+ #define   PP_SEQUENCE_ON	(1 << 28)
+ #define   PP_SEQUENCE_OFF	(2 << 28)
+ #define   PP_SEQUENCE_MASK	0x30000000
++#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
++#define   PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
++#define   PP_SEQUENCE_STATE_MASK 0x0000000f
+ #define PP_CONTROL	0x61204
+ #define   POWER_TARGET_ON	(1 << 0)
+ #define PP_ON_DELAYS	0x61208
+@@ -1481,6 +1522,7 @@
+ # define TV_TEST_MODE_MASK		(7 << 0)
+ 
+ #define TV_DAC			0x68004
++# define TV_DAC_SAVE		0x00ffff00
+ /**
+  * Reports that DAC state change logic has reported change (RO).
+  *
+@@ -2075,29 +2117,35 @@
+ 
+ /* Display & cursor control */
+ 
+-/* dithering flag on Ironlake */
+-#define PIPE_ENABLE_DITHER		(1 << 4)
+-#define PIPE_DITHER_TYPE_MASK		(3 << 2)
+-#define PIPE_DITHER_TYPE_SPATIAL	(0 << 2)
+-#define PIPE_DITHER_TYPE_ST01		(1 << 2)
+ /* Pipe A */
+ #define PIPEADSL		0x70000
+-#define   DSL_LINEMASK	       	0x00000fff
++#define   DSL_LINEMASK		0x00000fff
+ #define PIPEACONF		0x70008
+-#define   PIPEACONF_ENABLE	(1<<31)
+-#define   PIPEACONF_DISABLE	0
+-#define   PIPEACONF_DOUBLE_WIDE	(1<<30)
++#define   PIPECONF_ENABLE	(1<<31)
++#define   PIPECONF_DISABLE	0
++#define   PIPECONF_DOUBLE_WIDE	(1<<30)
+ #define   I965_PIPECONF_ACTIVE	(1<<30)
+-#define   PIPEACONF_SINGLE_WIDE	0
+-#define   PIPEACONF_PIPE_UNLOCKED 0
+-#define   PIPEACONF_PIPE_LOCKED	(1<<25)
+-#define   PIPEACONF_PALETTE	0
+-#define   PIPEACONF_GAMMA		(1<<24)
++#define   PIPECONF_SINGLE_WIDE	0
++#define   PIPECONF_PIPE_UNLOCKED 0
++#define   PIPECONF_PIPE_LOCKED	(1<<25)
++#define   PIPECONF_PALETTE	0
++#define   PIPECONF_GAMMA		(1<<24)
+ #define   PIPECONF_FORCE_BORDER	(1<<25)
+ #define   PIPECONF_PROGRESSIVE	(0 << 21)
+ #define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+ #define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+ #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
++#define   PIPECONF_BPP_MASK	(0x000000e0)
++#define   PIPECONF_BPP_8	(0<<5)
++#define   PIPECONF_BPP_10	(1<<5)
++#define   PIPECONF_BPP_6	(2<<5)
++#define   PIPECONF_BPP_12	(3<<5)
++#define   PIPECONF_DITHER_EN	(1<<4)
++#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
++#define   PIPECONF_DITHER_TYPE_SP (0<<2)
++#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
++#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
++#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
+ #define PIPEASTAT		0x70024
+ #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+ #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
+@@ -2128,12 +2176,15 @@
+ #define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
+ #define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+ #define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
+-#define   PIPE_BPC_MASK 			(7 << 5) /* Ironlake */
++#define   PIPE_BPC_MASK				(7 << 5) /* Ironlake */
+ #define   PIPE_8BPC				(0 << 5)
+ #define   PIPE_10BPC				(1 << 5)
+ #define   PIPE_6BPC				(2 << 5)
+ #define   PIPE_12BPC				(3 << 5)
+ 
++#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
++#define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
++
+ #define DSPARB			0x70030
+ #define   DSPARB_CSTART_MASK	(0x7f << 7)
+ #define   DSPARB_CSTART_SHIFT	7
+@@ -2206,8 +2257,8 @@
+ #define  WM1_LP_SR_EN		(1<<31)
+ #define  WM1_LP_LATENCY_SHIFT	24
+ #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
+-#define  WM1_LP_FBC_LP1_MASK	(0xf<<20)
+-#define  WM1_LP_FBC_LP1_SHIFT	20
++#define  WM1_LP_FBC_MASK	(0xf<<20)
++#define  WM1_LP_FBC_SHIFT	20
+ #define  WM1_LP_SR_MASK		(0x1ff<<8)
+ #define  WM1_LP_SR_SHIFT	8
+ #define  WM1_LP_CURSOR_MASK	(0x3f)
+@@ -2333,6 +2384,14 @@
+ #define DSPASURF		0x7019C /* 965+ only */
+ #define DSPATILEOFF		0x701A4 /* 965+ only */
+ 
++#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
++#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
++#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
++#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
++#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
++#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
++#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
++
+ /* VBIOS flags */
+ #define SWF00			0x71410
+ #define SWF01			0x71414
+@@ -2397,6 +2456,7 @@
+ #define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
+ 
+ #define FDI_PLL_BIOS_0  0x46000
++#define  FDI_PLL_FB_CLOCK_MASK  0xff
+ #define FDI_PLL_BIOS_1  0x46004
+ #define FDI_PLL_BIOS_2  0x46008
+ #define DISPLAY_PORT_PLL_BIOS_0         0x4600c
+@@ -2420,46 +2480,47 @@
+ #define PIPEA_DATA_M1           0x60030
+ #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+ #define  TU_SIZE_MASK           0x7e000000
+-#define  PIPEA_DATA_M1_OFFSET   0
++#define  PIPE_DATA_M1_OFFSET    0
+ #define PIPEA_DATA_N1           0x60034
+-#define  PIPEA_DATA_N1_OFFSET   0
++#define  PIPE_DATA_N1_OFFSET    0
+ 
+ #define PIPEA_DATA_M2           0x60038
+-#define  PIPEA_DATA_M2_OFFSET   0
++#define  PIPE_DATA_M2_OFFSET    0
+ #define PIPEA_DATA_N2           0x6003c
+-#define  PIPEA_DATA_N2_OFFSET   0
++#define  PIPE_DATA_N2_OFFSET    0
+ 
+ #define PIPEA_LINK_M1           0x60040
+-#define  PIPEA_LINK_M1_OFFSET   0
++#define  PIPE_LINK_M1_OFFSET    0
+ #define PIPEA_LINK_N1           0x60044
+-#define  PIPEA_LINK_N1_OFFSET   0
++#define  PIPE_LINK_N1_OFFSET    0
+ 
+ #define PIPEA_LINK_M2           0x60048
+-#define  PIPEA_LINK_M2_OFFSET   0
++#define  PIPE_LINK_M2_OFFSET    0
+ #define PIPEA_LINK_N2           0x6004c
+-#define  PIPEA_LINK_N2_OFFSET   0
++#define  PIPE_LINK_N2_OFFSET    0
+ 
+ /* PIPEB timing regs are same start from 0x61000 */
+ 
+ #define PIPEB_DATA_M1           0x61030
+-#define  PIPEB_DATA_M1_OFFSET   0
+ #define PIPEB_DATA_N1           0x61034
+-#define  PIPEB_DATA_N1_OFFSET   0
+ 
+ #define PIPEB_DATA_M2           0x61038
+-#define  PIPEB_DATA_M2_OFFSET   0
+ #define PIPEB_DATA_N2           0x6103c
+-#define  PIPEB_DATA_N2_OFFSET   0
+ 
+ #define PIPEB_LINK_M1           0x61040
+-#define  PIPEB_LINK_M1_OFFSET   0
+ #define PIPEB_LINK_N1           0x61044
+-#define  PIPEB_LINK_N1_OFFSET   0
+ 
+ #define PIPEB_LINK_M2           0x61048
+-#define  PIPEB_LINK_M2_OFFSET   0
+ #define PIPEB_LINK_N2           0x6104c
+-#define  PIPEB_LINK_N2_OFFSET   0
++
++#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
++#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
++#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
++#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
++#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
++#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
++#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
++#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+ 
+ /* CPU panel fitter */
+ #define PFA_CTL_1               0x68080
+@@ -2516,7 +2577,8 @@
+ #define GT_SYNC_STATUS          (1 << 2)
+ #define GT_USER_INTERRUPT       (1 << 0)
+ #define GT_BSD_USER_INTERRUPT   (1 << 5)
+-
++#define GT_GEN6_BSD_USER_INTERRUPT	(1 << 12)
++#define GT_BLT_USER_INTERRUPT	(1 << 22)
+ 
+ #define GTISR   0x44010
+ #define GTIMR   0x44014
+@@ -2551,6 +2613,10 @@
+ #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
+ #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
+ #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
++#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
++				 SDE_PORTD_HOTPLUG_CPT |	\
++				 SDE_PORTC_HOTPLUG_CPT |	\
++				 SDE_PORTB_HOTPLUG_CPT)
+ 
+ #define SDEISR  0xc4000
+ #define SDEIMR  0xc4004
+@@ -2600,11 +2666,14 @@
+ 
+ #define PCH_DPLL_A              0xc6014
+ #define PCH_DPLL_B              0xc6018
++#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+ 
+ #define PCH_FPA0                0xc6040
+ #define PCH_FPA1                0xc6044
+ #define PCH_FPB0                0xc6048
+ #define PCH_FPB1                0xc604c
++#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
++#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+ 
+ #define PCH_DPLL_TEST           0xc606c
+ 
+@@ -2690,6 +2759,13 @@
+ #define TRANS_VBLANK_B          0xe1010
+ #define TRANS_VSYNC_B           0xe1014
+ 
++#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
++#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
++#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
++#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
++#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
++#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
++
+ #define TRANSB_DATA_M1          0xe1030
+ #define TRANSB_DATA_N1          0xe1034
+ #define TRANSB_DATA_M2          0xe1038
+@@ -2701,6 +2777,7 @@
+ 
+ #define TRANSACONF              0xf0008
+ #define TRANSBCONF              0xf1008
++#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+ #define  TRANS_DISABLE          (0<<31)
+ #define  TRANS_ENABLE           (1<<31)
+ #define  TRANS_STATE_MASK       (1<<30)
+@@ -2721,10 +2798,15 @@
+ #define FDI_RXA_CHICKEN         0xc200c
+ #define FDI_RXB_CHICKEN         0xc2010
+ #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
++#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
++
++#define SOUTH_DSPCLK_GATE_D	0xc2020
++#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+ 
+ /* CPU: FDI_TX */
+ #define FDI_TXA_CTL             0x60100
+ #define FDI_TXB_CTL             0x61100
++#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+ #define  FDI_TX_DISABLE         (0<<31)
+ #define  FDI_TX_ENABLE          (1<<31)
+ #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
+@@ -2766,8 +2848,8 @@
+ /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+ #define FDI_RXA_CTL             0xf000c
+ #define FDI_RXB_CTL             0xf100c
++#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+ #define  FDI_RX_ENABLE          (1<<31)
+-#define  FDI_RX_DISABLE         (0<<31)
+ /* train, dp width same as FDI_TX */
+ #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
+ #define  FDI_8BPC                       (0<<16)
+@@ -2782,8 +2864,7 @@
+ #define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
+ #define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
+ #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
+-#define  FDI_SEL_RAWCLK                 (0<<4)
+-#define  FDI_SEL_PCDCLK                 (1<<4)
++#define  FDI_PCDCLK	                (1<<4)
+ /* CPT */
+ #define  FDI_AUTO_TRAINING			(1<<10)
+ #define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
+@@ -2798,6 +2879,9 @@
+ #define FDI_RXA_TUSIZE2         0xf0038
+ #define FDI_RXB_TUSIZE1         0xf1030
+ #define FDI_RXB_TUSIZE2         0xf1038
++#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
++#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
++#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+ 
+ /* FDI_RX interrupt register format */
+ #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
+@@ -2816,6 +2900,8 @@
+ #define FDI_RXA_IMR             0xf0018
+ #define FDI_RXB_IIR             0xf1014
+ #define FDI_RXB_IMR             0xf1018
++#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
++#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+ 
+ #define FDI_PLL_CTL_1           0xfe000
+ #define FDI_PLL_CTL_2           0xfe004
+@@ -2935,6 +3021,7 @@
+ #define TRANS_DP_CTL_A		0xe0300
+ #define TRANS_DP_CTL_B		0xe1300
+ #define TRANS_DP_CTL_C		0xe2300
++#define TRANS_DP_CTL(pipe)	(TRANS_DP_CTL_A + (pipe) * 0x01000)
+ #define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
+ #define  TRANS_DP_PORT_SEL_B	(0<<29)
+ #define  TRANS_DP_PORT_SEL_C	(1<<29)
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 31f0858..454c064 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 		dev_priv->saveFPA1 = I915_READ(FPA1);
+ 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ 	}
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+ 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+ 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+ 	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+ 	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+ 		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ 	}
+@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 		dev_priv->saveFPB1 = I915_READ(FPB1);
+ 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ 	}
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+ 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+ 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+ 	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+ 	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+-	if (IS_I965GM(dev) || IS_GM45(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+ 		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ 	}
+@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ 	POSTING_READ(dpll_a_reg);
+ 	udelay(150);
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ 		POSTING_READ(DPLL_A_MD);
+ 	}
+@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+ 	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+ 	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+ 		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ 	}
+@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ 	POSTING_READ(dpll_b_reg);
+ 	udelay(150);
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ 		POSTING_READ(DPLL_B_MD);
+ 	}
+@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+ 	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+ 	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+ 		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ 	}
+@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
+ 	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ 	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ 	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+-	if (!IS_I9XX(dev))
++	if (IS_GEN2(dev))
+ 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ 
+ 	/* CRT state */
+@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
+ 		dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ 		dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+-		if (IS_I965G(dev))
++		if (INTEL_INFO(dev)->gen >= 4)
+ 			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ 		if (IS_MOBILE(dev) && !IS_I830(dev))
+ 			dev_priv->saveLVDS = I915_READ(LVDS);
+@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
+ 	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ 	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ 	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+-	if (!IS_I9XX(dev))
++	if (IS_GEN2(dev))
+ 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ 
+ 	/* CRT state */
+@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
+ 		I915_WRITE(ADPA, dev_priv->saveADPA);
+ 
+ 	/* LVDS state */
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ 
+ 	if (HAS_PCH_SPLIT(dev)) {
+@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
+ 	/* Clock gating state */
+ 	intel_init_clock_gating(dev);
+ 
+-	if (HAS_PCH_SPLIT(dev))
++	if (HAS_PCH_SPLIT(dev)) {
+ 		ironlake_enable_drps(dev);
++		intel_init_emon(dev);
++	}
+ 
+ 	/* Cache mode state */
+ 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+@@ -878,9 +880,7 @@ int i915_restore_state(struct drm_device *dev)
+ 	for (i = 0; i < 3; i++)
+ 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ 
+-	/* I2C state */
+-	intel_i2c_reset_gmbus(dev);
++	intel_i2c_reset(dev);
+ 
+ 	return 0;
+ }
+-
+diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
+new file mode 100644
+index 0000000..65c88f9
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_acpi.c
+@@ -0,0 +1,286 @@
++/*
++ * Intel ACPI functions
++ *
++ * _DSM related code stolen from nouveau_acpi.c.
++ */
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/vga_switcheroo.h>
++#include <acpi/acpi_drivers.h>
++
++#include "drmP.h"
++
++#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
++
++#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
++#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
++
++static struct intel_dsm_priv {
++	acpi_handle dhandle;
++} intel_dsm_priv;
++
++static const u8 intel_dsm_guid[] = {
++	0xd3, 0x73, 0xd8, 0x7e,
++	0xd0, 0xc2,
++	0x4f, 0x4e,
++	0xa8, 0x54,
++	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
++};
++
++static int intel_dsm(acpi_handle handle, int func, int arg)
++{
++	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct acpi_object_list input;
++	union acpi_object params[4];
++	union acpi_object *obj;
++	u32 result;
++	int ret = 0;
++
++	input.count = 4;
++	input.pointer = params;
++	params[0].type = ACPI_TYPE_BUFFER;
++	params[0].buffer.length = sizeof(intel_dsm_guid);
++	params[0].buffer.pointer = (char *)intel_dsm_guid;
++	params[1].type = ACPI_TYPE_INTEGER;
++	params[1].integer.value = INTEL_DSM_REVISION_ID;
++	params[2].type = ACPI_TYPE_INTEGER;
++	params[2].integer.value = func;
++	params[3].type = ACPI_TYPE_INTEGER;
++	params[3].integer.value = arg;
++
++	ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
++	if (ret) {
++		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
++		return ret;
++	}
++
++	obj = (union acpi_object *)output.pointer;
++
++	result = 0;
++	switch (obj->type) {
++	case ACPI_TYPE_INTEGER:
++		result = obj->integer.value;
++		break;
++
++	case ACPI_TYPE_BUFFER:
++		if (obj->buffer.length == 4) {
++			result =(obj->buffer.pointer[0] |
++				(obj->buffer.pointer[1] <<  8) |
++				(obj->buffer.pointer[2] << 16) |
++				(obj->buffer.pointer[3] << 24));
++			break;
++		}
++	default:
++		ret = -EINVAL;
++		break;
++	}
++	if (result == 0x80000002)
++		ret = -ENODEV;
++
++	kfree(output.pointer);
++	return ret;
++}
++
++static char *intel_dsm_port_name(u8 id)
++{
++	switch (id) {
++	case 0:
++		return "Reserved";
++	case 1:
++		return "Analog VGA";
++	case 2:
++		return "LVDS";
++	case 3:
++		return "Reserved";
++	case 4:
++		return "HDMI/DVI_B";
++	case 5:
++		return "HDMI/DVI_C";
++	case 6:
++		return "HDMI/DVI_D";
++	case 7:
++		return "DisplayPort_A";
++	case 8:
++		return "DisplayPort_B";
++	case 9:
++		return "DisplayPort_C";
++	case 0xa:
++		return "DisplayPort_D";
++	case 0xb:
++	case 0xc:
++	case 0xd:
++		return "Reserved";
++	case 0xe:
++		return "WiDi";
++	default:
++		return "bad type";
++	}
++}
++
++static char *intel_dsm_mux_type(u8 type)
++{
++	switch (type) {
++	case 0:
++		return "unknown";
++	case 1:
++		return "No MUX, iGPU only";
++	case 2:
++		return "No MUX, dGPU only";
++	case 3:
++		return "MUXed between iGPU and dGPU";
++	default:
++		return "bad type";
++	}
++}
++
++static void intel_dsm_platform_mux_info(void)
++{
++	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct acpi_object_list input;
++	union acpi_object params[4];
++	union acpi_object *pkg;
++	int i, ret;
++
++	input.count = 4;
++	input.pointer = params;
++	params[0].type = ACPI_TYPE_BUFFER;
++	params[0].buffer.length = sizeof(intel_dsm_guid);
++	params[0].buffer.pointer = (char *)intel_dsm_guid;
++	params[1].type = ACPI_TYPE_INTEGER;
++	params[1].integer.value = INTEL_DSM_REVISION_ID;
++	params[2].type = ACPI_TYPE_INTEGER;
++	params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
++	params[3].type = ACPI_TYPE_INTEGER;
++	params[3].integer.value = 0;
++
++	ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
++				   &output);
++	if (ret) {
++		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
++		goto out;
++	}
++
++	pkg = (union acpi_object *)output.pointer;
++
++	if (pkg->type == ACPI_TYPE_PACKAGE) {
++		union acpi_object *connector_count = &pkg->package.elements[0];
++		DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
++			  (unsigned long long)connector_count->integer.value);
++		for (i = 1; i < pkg->package.count; i++) {
++			union acpi_object *obj = &pkg->package.elements[i];
++			union acpi_object *connector_id =
++				&obj->package.elements[0];
++			union acpi_object *info = &obj->package.elements[1];
++			DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
++				  (unsigned long long)connector_id->integer.value);
++			DRM_DEBUG_DRIVER("  port id: %s\n",
++			       intel_dsm_port_name(info->buffer.pointer[0]));
++			DRM_DEBUG_DRIVER("  display mux info: %s\n",
++			       intel_dsm_mux_type(info->buffer.pointer[1]));
++			DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
++			       intel_dsm_mux_type(info->buffer.pointer[2]));
++			DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
++			       intel_dsm_mux_type(info->buffer.pointer[3]));
++		}
++	} else {
++		DRM_ERROR("MUX INFO call failed\n");
++	}
++
++out:
++	kfree(output.pointer);
++}
++
++static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
++{
++	return 0;
++}
++
++static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
++				 enum vga_switcheroo_state state)
++{
++	return 0;
++}
++
++static int intel_dsm_init(void)
++{
++	return 0;
++}
++
++static int intel_dsm_get_client_id(struct pci_dev *pdev)
++{
++	if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
++		return VGA_SWITCHEROO_IGD;
++	else
++		return VGA_SWITCHEROO_DIS;
++}
++
++static struct vga_switcheroo_handler intel_dsm_handler = {
++	.switchto = intel_dsm_switchto,
++	.power_state = intel_dsm_power_state,
++	.init = intel_dsm_init,
++	.get_client_id = intel_dsm_get_client_id,
++};
++
++static bool intel_dsm_pci_probe(struct pci_dev *pdev)
++{
++	acpi_handle dhandle, intel_handle;
++	acpi_status status;
++	int ret;
++
++	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++	if (!dhandle)
++		return false;
++
++	status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
++	if (ACPI_FAILURE(status)) {
++		DRM_DEBUG_KMS("no _DSM method for intel device\n");
++		return false;
++	}
++
++	ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
++	if (ret < 0) {
++		DRM_ERROR("failed to get supported _DSM functions\n");
++		return false;
++	}
++
++	intel_dsm_priv.dhandle = dhandle;
++
++	intel_dsm_platform_mux_info();
++	return true;
++}
++
++static bool intel_dsm_detect(void)
++{
++	char acpi_method_name[255] = { 0 };
++	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
++	struct pci_dev *pdev = NULL;
++	bool has_dsm = false;
++	int vga_count = 0;
++
++	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++		vga_count++;
++		has_dsm |= intel_dsm_pci_probe(pdev);
++	}
++
++	if (vga_count == 2 && has_dsm) {
++		acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
++		DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
++				 acpi_method_name);
++		return true;
++	}
++
++	return false;
++}
++
++void intel_register_dsm_handler(void)
++{
++	if (!intel_dsm_detect())
++		return;
++
++	vga_switcheroo_register_handler(&intel_dsm_handler);
++}
++
++void intel_unregister_dsm_handler(void)
++{
++	vga_switcheroo_unregister_handler();
++}
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 96f75d7..b0b1200 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -24,6 +24,7 @@
+  *    Eric Anholt <eric@anholt.net>
+  *
+  */
++#include <drm/drm_dp_helper.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 	int i, temp_downclock;
+ 	struct drm_display_mode *temp_mode;
+ 
+-	/* Defaults if we can't find VBT info */
+-	dev_priv->lvds_dither = 0;
+-	dev_priv->lvds_vbt = 0;
+-
+ 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ 	if (!lvds_options)
+ 		return;
+@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 	dev_priv->lvds_dither = lvds_options->pixel_dither;
+ 	if (lvds_options->panel_type == 0xff)
+ 		return;
++
+ 	panel_type = lvds_options->panel_type;
+ 
+ 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 			((unsigned char *)entry + dvo_timing_offset);
+ 
+ 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++	if (!panel_fixed_mode)
++		return;
+ 
+ 	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+ 
+@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+ 	struct lvds_dvo_timing *dvo_timing;
+ 	struct drm_display_mode *panel_fixed_mode;
+ 
+-	dev_priv->sdvo_lvds_vbt_mode = NULL;
+-
+ 	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ 	if (!sdvo_lvds_options)
+ 		return;
+@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ 	struct drm_device *dev = dev_priv->dev;
+ 	struct bdb_general_features *general;
+ 
+-	/* Set sensible defaults in case we can't find the general block */
+-	dev_priv->int_tv_support = 1;
+-	dev_priv->int_crt_support = 1;
+-
+ 	general = find_section(bdb, BDB_GENERAL_FEATURES);
+ 	if (general) {
+ 		dev_priv->int_tv_support = general->int_tv_support;
+@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ 		dev_priv->lvds_use_ssc = general->enable_ssc;
+ 
+ 		if (dev_priv->lvds_use_ssc) {
+-			if (IS_I85X(dev_priv->dev))
++			if (IS_I85X(dev))
+ 				dev_priv->lvds_ssc_freq =
+ 					general->ssc_freq ? 66 : 48;
+-			else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
++			else if (IS_GEN5(dev) || IS_GEN6(dev))
+ 				dev_priv->lvds_ssc_freq =
+ 					general->ssc_freq ? 100 : 120;
+ 			else
+@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ 			  struct bdb_header *bdb)
+ {
+ 	struct bdb_general_definitions *general;
+-	const int crt_bus_map_table[] = {
+-		GPIOB,
+-		GPIOA,
+-		GPIOC,
+-		GPIOD,
+-		GPIOE,
+-		GPIOF,
+-	};
+ 
+ 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ 	if (general) {
+@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ 		if (block_size >= sizeof(*general)) {
+ 			int bus_pin = general->crt_ddc_gmbus_pin;
+ 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+-			if ((bus_pin >= 1) && (bus_pin <= 6)) {
+-				dev_priv->crt_ddc_bus =
+-					crt_bus_map_table[bus_pin-1];
+-			}
++			if (bus_pin >= 1 && bus_pin <= 6)
++				dev_priv->crt_ddc_pin = bus_pin;
+ 		} else {
+ 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ 				  block_size);
+@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ 
+ static void
+ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+-		       struct bdb_header *bdb)
++			  struct bdb_header *bdb)
+ {
+ 	struct sdvo_device_mapping *p_mapping;
+ 	struct bdb_general_definitions *p_defs;
+@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 
+ 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ 	if (!p_defs) {
+-		DRM_DEBUG_KMS("No general definition block is found\n");
++		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ 		return;
+ 	}
+ 	/* judge whether the size of child device meets the requirements.
+@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 			p_mapping->slave_addr = p_child->slave_addr;
+ 			p_mapping->dvo_wiring = p_child->dvo_wiring;
+ 			p_mapping->ddc_pin = p_child->ddc_pin;
++			p_mapping->i2c_pin = p_child->i2c_pin;
++			p_mapping->i2c_speed = p_child->i2c_speed;
+ 			p_mapping->initialized = 1;
++			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
++				      p_mapping->dvo_port,
++				      p_mapping->slave_addr,
++				      p_mapping->dvo_wiring,
++				      p_mapping->ddc_pin,
++				      p_mapping->i2c_pin,
++				      p_mapping->i2c_speed);
+ 		} else {
+ 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ 					 "two SDVO device.\n");
+@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
+ 	if (!driver)
+ 		return;
+ 
+-	if (driver && SUPPORTS_EDP(dev) &&
+-	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
+-		dev_priv->edp_support = 1;
+-	} else {
+-		dev_priv->edp_support = 0;
+-	}
++	if (SUPPORTS_EDP(dev) &&
++	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
++		dev_priv->edp.support = 1;
+ 
+-	if (driver && driver->dual_frequency)
++	if (driver->dual_frequency)
+ 		dev_priv->render_reclock_avail = true;
+ }
+ 
+@@ -424,27 +414,78 @@ static void
+ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+ {
+ 	struct bdb_edp *edp;
++	struct edp_power_seq *edp_pps;
++	struct edp_link_params *edp_link_params;
+ 
+ 	edp = find_section(bdb, BDB_EDP);
+ 	if (!edp) {
+-		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
++		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
+ 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+-				      "supported, assume 18bpp panel color "
+-				      "depth.\n");
+-			dev_priv->edp_bpp = 18;
++				      "supported, assume %dbpp panel color "
++				      "depth.\n",
++				      dev_priv->edp.bpp);
+ 		}
+ 		return;
+ 	}
+ 
+ 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ 	case EDP_18BPP:
+-		dev_priv->edp_bpp = 18;
++		dev_priv->edp.bpp = 18;
+ 		break;
+ 	case EDP_24BPP:
+-		dev_priv->edp_bpp = 24;
++		dev_priv->edp.bpp = 24;
+ 		break;
+ 	case EDP_30BPP:
+-		dev_priv->edp_bpp = 30;
++		dev_priv->edp.bpp = 30;
++		break;
++	}
++
++	/* Get the eDP sequencing and link info */
++	edp_pps = &edp->power_seqs[panel_type];
++	edp_link_params = &edp->link_params[panel_type];
++
++	dev_priv->edp.pps = *edp_pps;
++
++	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
++		DP_LINK_BW_1_62;
++	switch (edp_link_params->lanes) {
++	case 0:
++		dev_priv->edp.lanes = 1;
++		break;
++	case 1:
++		dev_priv->edp.lanes = 2;
++		break;
++	case 3:
++	default:
++		dev_priv->edp.lanes = 4;
++		break;
++	}
++	switch (edp_link_params->preemphasis) {
++	case 0:
++		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
++		break;
++	case 1:
++		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
++		break;
++	case 2:
++		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
++		break;
++	case 3:
++		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
++		break;
++	}
++	switch (edp_link_params->vswing) {
++	case 0:
++		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
++		break;
++	case 1:
++		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
++		break;
++	case 2:
++		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
++		break;
++	case 3:
++		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ 		break;
+ 	}
+ }
+@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+ 
+ 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ 	if (!p_defs) {
+-		DRM_DEBUG_KMS("No general definition block is found\n");
++		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ 		return;
+ 	}
+ 	/* judge whether the size of child device meets the requirements.
+@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+ 	}
+ 	return;
+ }
++
++static void
++init_vbt_defaults(struct drm_i915_private *dev_priv)
++{
++	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
++
++	/* LFP panel data */
++	dev_priv->lvds_dither = 1;
++	dev_priv->lvds_vbt = 0;
++
++	/* SDVO panel data */
++	dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++	/* general features */
++	dev_priv->int_tv_support = 1;
++	dev_priv->int_crt_support = 1;
++	dev_priv->lvds_use_ssc = 0;
++
++	/* eDP data */
++	dev_priv->edp.bpp = 18;
++}
++
+ /**
+- * intel_init_bios - initialize VBIOS settings & find VBT
++ * intel_parse_bios - find VBT and initialize settings from the BIOS
+  * @dev: DRM device
+  *
+  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+  * to appropriate values.
+  *
+- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+- * feed an updated VBT back through that, compared to what we'll fetch using
+- * this method of groping around in the BIOS data.
+- *
+  * Returns 0 on success, nonzero on failure.
+  */
+ bool
+-intel_init_bios(struct drm_device *dev)
++intel_parse_bios(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct pci_dev *pdev = dev->pdev;
+-	struct vbt_header *vbt = NULL;
+-	struct bdb_header *bdb;
+-	u8 __iomem *bios;
+-	size_t size;
+-	int i;
+-
+-	bios = pci_map_rom(pdev, &size);
+-	if (!bios)
+-		return -1;
+-
+-	/* Scour memory looking for the VBT signature */
+-	for (i = 0; i + 4 < size; i++) {
+-		if (!memcmp(bios + i, "$VBT", 4)) {
+-			vbt = (struct vbt_header *)(bios + i);
+-			break;
+-		}
++	struct bdb_header *bdb = NULL;
++	u8 __iomem *bios = NULL;
++
++	init_vbt_defaults(dev_priv);
++
++	/* XXX Should this validation be moved to intel_opregion.c? */
++	if (dev_priv->opregion.vbt) {
++		struct vbt_header *vbt = dev_priv->opregion.vbt;
++		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
++			DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
++					 vbt->signature);
++			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
++		} else
++			dev_priv->opregion.vbt = NULL;
+ 	}
+ 
+-	if (!vbt) {
+-		DRM_ERROR("VBT signature missing\n");
+-		pci_unmap_rom(pdev, bios);
+-		return -1;
+-	}
++	if (bdb == NULL) {
++		struct vbt_header *vbt = NULL;
++		size_t size;
++		int i;
+ 
+-	bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++		bios = pci_map_rom(pdev, &size);
++		if (!bios)
++			return -1;
++
++		/* Scour memory looking for the VBT signature */
++		for (i = 0; i + 4 < size; i++) {
++			if (!memcmp(bios + i, "$VBT", 4)) {
++				vbt = (struct vbt_header *)(bios + i);
++				break;
++			}
++		}
++
++		if (!vbt) {
++			DRM_ERROR("VBT signature missing\n");
++			pci_unmap_rom(pdev, bios);
++			return -1;
++		}
++
++		bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++	}
+ 
+ 	/* Grab useful general definitions */
+ 	parse_general_features(dev_priv, bdb);
+@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
+ 	parse_driver_features(dev_priv, bdb);
+ 	parse_edp(dev_priv, bdb);
+ 
+-	pci_unmap_rom(pdev, bios);
++	if (bios)
++		pci_unmap_rom(pdev, bios);
+ 
+ 	return 0;
+ }
++
++/* Ensure that vital registers have been initialised, even if the BIOS
++ * is absent or just failing to do its job.
++ */
++void intel_setup_bios(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	 /* Set the Panel Power On/Off timings if uninitialized. */
++	if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
++		/* Set T2 to 40ms and T5 to 200ms */
++		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
++
++		/* Set T3 to 35ms and Tx to 200ms */
++		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
++	}
++}
+diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
+index 4c18514..5f8e4ed 100644
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -197,7 +197,8 @@ struct bdb_general_features {
+ struct child_device_config {
+ 	u16 handle;
+ 	u16 device_type;
+-	u8  device_id[10]; /* See DEVICE_TYPE_* above */
++	u8  i2c_speed;
++	u8  rsvd[9];
+ 	u16 addin_offset;
+ 	u8  dvo_port; /* See Device_PORT_* above */
+ 	u8  i2c_pin;
+@@ -466,7 +467,8 @@ struct bdb_edp {
+ 	struct edp_link_params link_params[16];
+ } __attribute__ ((packed));
+ 
+-bool intel_init_bios(struct drm_device *dev);
++void intel_setup_bios(struct drm_device *dev);
++bool intel_parse_bios(struct drm_device *dev);
+ 
+ /*
+  * Driver<->VBIOS interaction occurs through scratch bits in
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 197d4f3..c55c770 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
+ 	if (mode->clock < 25000)
+ 		return MODE_CLOCK_LOW;
+ 
+-	if (!IS_I9XX(dev))
++	if (IS_GEN2(dev))
+ 		max_clock = 350000;
+ 	else
+ 		max_clock = 400000;
+@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
+ 	 * Disable separate mode multiplier used when cloning SDVO to CRT
+ 	 * XXX this needs to be adjusted when we really are cloning
+ 	 */
+-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ 		dpll_md = I915_READ(dpll_md_reg);
+ 		I915_WRITE(dpll_md_reg,
+ 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+ 	I915_WRITE(PCH_ADPA, adpa);
+ 
+ 	if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+-		     1000, 1))
++		     1000))
+ 		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ 
+ 	if (turn_off_dac) {
+-		I915_WRITE(PCH_ADPA, temp);
++		/* Make sure hotplug is enabled */
++		I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
+ 		(void)I915_READ(PCH_ADPA);
+ 	}
+ 
+@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ 		/* wait for FORCE_DETECT to go off */
+ 		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ 			      CRT_HOTPLUG_FORCE_DETECT) == 0,
+-			     1000, 1))
++			     1000))
+ 			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ 	}
+ 
+@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ 	return ret;
+ }
+ 
++static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
++{
++	u8 buf;
++	struct i2c_msg msgs[] = {
++		{
++			.addr = 0xA0,
++			.flags = 0,
++			.len = 1,
++			.buf = &buf,
++		},
++	};
++	/* DDC monitor detect: Does it ACK a write to 0xA0? */
++	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
++}
++
+ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
+ {
+-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
++	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
++	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ 
+ 	/* CRT should always be at 0, but check anyway */
+ 	if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
+ 		return false;
+ 
+-	return intel_ddc_probe(intel_encoder);
++	if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
++		DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
++		return true;
++	}
++
++	if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
++		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
++		return true;
++	}
++
++	return false;
+ }
+ 
+ static enum drm_connector_status
+ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+ {
+-	struct drm_encoder *encoder = &intel_encoder->enc;
++	struct drm_encoder *encoder = &intel_encoder->base;
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
+ 	uint8_t	st00;
+ 	enum drm_connector_status status;
+ 
++	DRM_DEBUG_KMS("starting load-detect on CRT\n");
++
+ 	if (pipe == 0) {
+ 		bclrpat_reg = BCLRPAT_A;
+ 		vtotal_reg = VTOTAL_A;
+@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
+ 	/* Set the border color to purple. */
+ 	I915_WRITE(bclrpat_reg, 0x500050);
+ 
+-	if (IS_I9XX(dev)) {
++	if (!IS_GEN2(dev)) {
+ 		uint32_t pipeconf = I915_READ(pipeconf_reg);
+ 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
++		POSTING_READ(pipeconf_reg);
+ 		/* Wait for next Vblank to substitue
+ 		 * border color for Color info */
+ 		intel_wait_for_vblank(dev, pipe);
+@@ -404,34 +434,37 @@ static enum drm_connector_status
+ intel_crt_detect(struct drm_connector *connector, bool force)
+ {
+ 	struct drm_device *dev = connector->dev;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
++	struct intel_encoder *encoder = intel_attached_encoder(connector);
+ 	struct drm_crtc *crtc;
+ 	int dpms_mode;
+ 	enum drm_connector_status status;
+ 
+-	if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+-		if (intel_crt_detect_hotplug(connector))
++	if (I915_HAS_HOTPLUG(dev)) {
++		if (intel_crt_detect_hotplug(connector)) {
++			DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ 			return connector_status_connected;
+-		else
++		} else
+ 			return connector_status_disconnected;
+ 	}
+ 
+-	if (intel_crt_detect_ddc(encoder))
++	if (intel_crt_detect_ddc(&encoder->base))
+ 		return connector_status_connected;
+ 
+ 	if (!force)
+ 		return connector->status;
+ 
+ 	/* for pre-945g platforms use load detect */
+-	if (encoder->crtc && encoder->crtc->enabled) {
+-		status = intel_crt_load_detect(encoder->crtc, intel_encoder);
++	if (encoder->base.crtc && encoder->base.crtc->enabled) {
++		status = intel_crt_load_detect(encoder->base.crtc, encoder);
+ 	} else {
+-		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
++		crtc = intel_get_load_detect_pipe(encoder, connector,
+ 						  NULL, &dpms_mode);
+ 		if (crtc) {
+-			status = intel_crt_load_detect(crtc, intel_encoder);
+-			intel_release_load_detect_pipe(intel_encoder,
++			if (intel_crt_detect_ddc(&encoder->base))
++				status = connector_status_connected;
++			else
++				status = intel_crt_load_detect(crtc, encoder);
++			intel_release_load_detect_pipe(encoder,
+ 						       connector, dpms_mode);
+ 		} else
+ 			status = connector_status_unknown;
+@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
+ 
+ static int intel_crt_get_modes(struct drm_connector *connector)
+ {
+-	int ret;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-	struct i2c_adapter *ddc_bus;
+ 	struct drm_device *dev = connector->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret;
+ 
+-
+-	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
++	ret = intel_ddc_get_modes(connector,
++				 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ 	if (ret || !IS_G4X(dev))
+-		goto end;
++		return ret;
+ 
+ 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
+-	ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+-
+-	if (!ddc_bus) {
+-		dev_printk(KERN_ERR, &connector->dev->pdev->dev,
+-			   "DDC bus registration failed for CRTDDC_D.\n");
+-		goto end;
+-	}
+-	/* Try to get modes by GPIOD port */
+-	ret = intel_ddc_get_modes(connector, ddc_bus);
+-	intel_i2c_destroy(ddc_bus);
+-
+-end:
+-	return ret;
+-
++	return intel_ddc_get_modes(connector,
++				   &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ }
+ 
+ static int intel_crt_set_property(struct drm_connector *connector,
+@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ 	.mode_valid = intel_crt_mode_valid,
+ 	.get_modes = intel_crt_get_modes,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
+ 	struct intel_encoder *intel_encoder;
+ 	struct intel_connector *intel_connector;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 i2c_reg;
+ 
+ 	intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
+ 	if (!intel_encoder)
+@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
+ 	drm_connector_init(dev, &intel_connector->base,
+ 			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ 
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
++	drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
+ 			 DRM_MODE_ENCODER_DAC);
+ 
+-	drm_mode_connector_attach_encoder(&intel_connector->base,
+-					  &intel_encoder->enc);
+-
+-	/* Set up the DDC bus. */
+-	if (HAS_PCH_SPLIT(dev))
+-		i2c_reg = PCH_GPIOA;
+-	else {
+-		i2c_reg = GPIOA;
+-		/* Use VBT information for CRT DDC if available */
+-		if (dev_priv->crt_ddc_bus != 0)
+-			i2c_reg = dev_priv->crt_ddc_bus;
+-	}
+-	intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+-	if (!intel_encoder->ddc_bus) {
+-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+-			   "failed.\n");
+-		return;
+-	}
++	intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 
+ 	intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ 	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
+ 	connector->interlace_allowed = 1;
+ 	connector->doublescan_allowed = 0;
+ 
+-	drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
++	drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
+ 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+ 
+ 	drm_sysfs_connector_add(connector);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9792285..0cece04 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -43,8 +43,8 @@
+ 
+ bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+ static void intel_update_watermarks(struct drm_device *dev);
+-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
+-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
++static void intel_increase_pllclock(struct drm_crtc *crtc);
++static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+ 
+ typedef struct {
+     /* given values */
+@@ -342,6 +342,16 @@ static bool
+ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ 			   int target, int refclk, intel_clock_t *best_clock);
+ 
++static inline u32 /* units of 100MHz */
++intel_fdi_link_freq(struct drm_device *dev)
++{
++	if (IS_GEN5(dev)) {
++		struct drm_i915_private *dev_priv = dev->dev_private;
++		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
++	} else
++		return 27;
++}
++
+ static const intel_limit_t intel_limits_i8xx_dvo = {
+         .dot = { .min = I8XX_DOT_MIN,		.max = I8XX_DOT_MAX },
+         .vco = { .min = I8XX_VCO_MIN,		.max = I8XX_VCO_MAX },
+@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ 		limit = intel_ironlake_limit(crtc);
+ 	else if (IS_G4X(dev)) {
+ 		limit = intel_g4x_limit(crtc);
+-	} else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
+-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+-			limit = &intel_limits_i9xx_lvds;
+-		else
+-			limit = &intel_limits_i9xx_sdvo;
+ 	} else if (IS_PINEVIEW(dev)) {
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ 			limit = &intel_limits_pineview_lvds;
+ 		else
+ 			limit = &intel_limits_pineview_sdvo;
++	} else if (!IS_GEN2(dev)) {
++		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++			limit = &intel_limits_i9xx_lvds;
++		else
++			limit = &intel_limits_i9xx_sdvo;
+ 	} else {
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ 			limit = &intel_limits_i8xx_lvds;
+@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
+ /**
+  * Returns whether any output on the specified pipe is of the specified type
+  */
+-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
++bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+ {
+-    struct drm_device *dev = crtc->dev;
+-    struct drm_mode_config *mode_config = &dev->mode_config;
+-    struct drm_encoder *l_entry;
++	struct drm_device *dev = crtc->dev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct intel_encoder *encoder;
+ 
+-    list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+-	    if (l_entry && l_entry->crtc == crtc) {
+-		    struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
+-		    if (intel_encoder->type == type)
+-			    return true;
+-	    }
+-    }
+-    return false;
++	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
++		if (encoder->base.crtc == crtc && encoder->type == type)
++			return true;
++
++	return false;
+ }
+ 
+ #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
+@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 	struct drm_device *dev = crtc->dev;
+ 	intel_clock_t clock;
+ 
+-	/* return directly when it is eDP */
+-	if (HAS_eDP)
+-		return true;
+-
+ 	if (target < 200000) {
+ 		clock.n = 1;
+ 		clock.p1 = 2;
+@@ -955,26 +958,26 @@ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 		      int target, int refclk, intel_clock_t *best_clock)
+ {
+-    intel_clock_t clock;
+-    if (target < 200000) {
+-	clock.p1 = 2;
+-	clock.p2 = 10;
+-	clock.n = 2;
+-	clock.m1 = 23;
+-	clock.m2 = 8;
+-    } else {
+-	clock.p1 = 1;
+-	clock.p2 = 10;
+-	clock.n = 1;
+-	clock.m1 = 14;
+-	clock.m2 = 2;
+-    }
+-    clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+-    clock.p = (clock.p1 * clock.p2);
+-    clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+-    clock.vco = 0;
+-    memcpy(best_clock, &clock, sizeof(intel_clock_t));
+-    return true;
++	intel_clock_t clock;
++	if (target < 200000) {
++		clock.p1 = 2;
++		clock.p2 = 10;
++		clock.n = 2;
++		clock.m1 = 23;
++		clock.m2 = 8;
++	} else {
++		clock.p1 = 1;
++		clock.p2 = 10;
++		clock.n = 1;
++		clock.m1 = 14;
++		clock.m2 = 2;
++	}
++	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
++	clock.p = (clock.p1 * clock.p2);
++	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
++	clock.vco = 0;
++	memcpy(best_clock, &clock, sizeof(intel_clock_t));
++	return true;
+ }
+ 
+ /**
+@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+ 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+ 
+ 	/* Wait for vblank interrupt bit to set */
+-	if (wait_for((I915_READ(pipestat_reg) &
+-		      PIPE_VBLANK_INTERRUPT_STATUS),
+-		     50, 0))
++	if (wait_for(I915_READ(pipestat_reg) &
++		     PIPE_VBLANK_INTERRUPT_STATUS,
++		     50))
+ 		DRM_DEBUG_KMS("vblank wait timed out\n");
+ }
+ 
+@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+  * Otherwise:
+  *   wait for the display line value to settle (it usually
+  *   ends up stopping at the start of the next frame).
+- *  
++ *
+  */
+-static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
++void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+ 	if (INTEL_INFO(dev)->gen >= 4) {
+-		int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
++		int reg = PIPECONF(pipe);
+ 
+ 		/* Wait for the Pipe State to go off */
+-		if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+-			     100, 0))
++		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
++			     100))
+ 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ 	} else {
+ 		u32 last_line;
+-		int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
++		int reg = PIPEDSL(pipe);
+ 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ 
+ 		/* Wait for the display line to settle */
+ 		do {
+-			last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
++			last_line = I915_READ(reg) & DSL_LINEMASK;
+ 			mdelay(5);
+-		} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
++		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ 			 time_after(timeout, jiffies));
+ 		if (time_after(jiffies, timeout))
+ 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ 	}
+ }
+ 
+-/* Parameters have changed, update FBC info */
+ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ {
+ 	struct drm_device *dev = crtc->dev;
+@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	int plane, i;
+ 	u32 fbc_ctl, fbc_ctl2;
+ 
++	if (fb->pitch == dev_priv->cfb_pitch &&
++	    obj_priv->fence_reg == dev_priv->cfb_fence &&
++	    intel_crtc->plane == dev_priv->cfb_plane &&
++	    I915_READ(FBC_CONTROL) & FBC_CTL_EN)
++		return;
++
++	i8xx_disable_fbc(dev);
++
+ 	dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ 
+ 	if (fb->pitch < dev_priv->cfb_pitch)
+@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	I915_WRITE(FBC_CONTROL, fbc_ctl);
+ 
+ 	DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+-		  dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
++		      dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ }
+ 
+ void i8xx_disable_fbc(struct drm_device *dev)
+@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 fbc_ctl;
+ 
+-	if (!I915_HAS_FBC(dev))
+-		return;
+-
+-	if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+-		return;	/* Already off, just return */
+-
+ 	/* Disable compression */
+ 	fbc_ctl = I915_READ(FBC_CONTROL);
++	if ((fbc_ctl & FBC_CTL_EN) == 0)
++		return;
++
+ 	fbc_ctl &= ~FBC_CTL_EN;
+ 	I915_WRITE(FBC_CONTROL, fbc_ctl);
+ 
+ 	/* Wait for compressing bit to clear */
+-	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
++	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ 		DRM_DEBUG_KMS("FBC idle timed out\n");
+ 		return;
+ 	}
+@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
+-		     DPFC_CTL_PLANEB);
++	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ 	unsigned long stall_watermark = 200;
+ 	u32 dpfc_ctl;
+ 
++	dpfc_ctl = I915_READ(DPFC_CONTROL);
++	if (dpfc_ctl & DPFC_CTL_EN) {
++		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
++		    dev_priv->cfb_fence == obj_priv->fence_reg &&
++		    dev_priv->cfb_plane == intel_crtc->plane &&
++		    dev_priv->cfb_y == crtc->y)
++			return;
++
++		I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
++		POSTING_READ(DPFC_CONTROL);
++		intel_wait_for_vblank(dev, intel_crtc->pipe);
++	}
++
+ 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ 	dev_priv->cfb_fence = obj_priv->fence_reg;
+ 	dev_priv->cfb_plane = intel_crtc->plane;
++	dev_priv->cfb_y = crtc->y;
+ 
+ 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 		I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ 	}
+ 
+-	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ 	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
+ 
+ 	/* Disable compression */
+ 	dpfc_ctl = I915_READ(DPFC_CONTROL);
+-	dpfc_ctl &= ~DPFC_CTL_EN;
+-	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
++	if (dpfc_ctl & DPFC_CTL_EN) {
++		dpfc_ctl &= ~DPFC_CTL_EN;
++		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ 
+-	DRM_DEBUG_KMS("disabled FBC\n");
++		DRM_DEBUG_KMS("disabled FBC\n");
++	}
+ }
+ 
+ static bool g4x_fbc_enabled(struct drm_device *dev)
+@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
+-					       DPFC_CTL_PLANEB;
++	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ 	unsigned long stall_watermark = 200;
+ 	u32 dpfc_ctl;
+ 
++	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
++	if (dpfc_ctl & DPFC_CTL_EN) {
++		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
++		    dev_priv->cfb_fence == obj_priv->fence_reg &&
++		    dev_priv->cfb_plane == intel_crtc->plane &&
++		    dev_priv->cfb_offset == obj_priv->gtt_offset &&
++		    dev_priv->cfb_y == crtc->y)
++			return;
++
++		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
++		POSTING_READ(ILK_DPFC_CONTROL);
++		intel_wait_for_vblank(dev, intel_crtc->pipe);
++	}
++
+ 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ 	dev_priv->cfb_fence = obj_priv->fence_reg;
+ 	dev_priv->cfb_plane = intel_crtc->plane;
++	dev_priv->cfb_offset = obj_priv->gtt_offset;
++	dev_priv->cfb_y = crtc->y;
+ 
+-	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ 	dpfc_ctl &= DPFC_RESERVED;
+ 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 		I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ 	}
+ 
+-	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ 	I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ 	/* enable it... */
+-	I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
+-		   DPFC_CTL_EN);
++	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ 
+ 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+ }
+@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
+ 
+ 	/* Disable compression */
+ 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+-	dpfc_ctl &= ~DPFC_CTL_EN;
+-	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
++	if (dpfc_ctl & DPFC_CTL_EN) {
++		dpfc_ctl &= ~DPFC_CTL_EN;
++		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ 
+-	DRM_DEBUG_KMS("disabled FBC\n");
++		DRM_DEBUG_KMS("disabled FBC\n");
++	}
+ }
+ 
+ static bool ironlake_fbc_enabled(struct drm_device *dev)
+@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
+ 
+ /**
+  * intel_update_fbc - enable/disable FBC as needed
+- * @crtc: CRTC to point the compressor at
+- * @mode: mode in use
++ * @dev: the drm_device
+  *
+  * Set up the framebuffer compression hardware at mode set time.  We
+  * enable it if possible:
+@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
+  *
+  * We need to enable/disable FBC on a global basis.
+  */
+-static void intel_update_fbc(struct drm_crtc *crtc,
+-			     struct drm_display_mode *mode)
++static void intel_update_fbc(struct drm_device *dev)
+ {
+-	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_framebuffer *fb = crtc->fb;
++	struct drm_crtc *crtc = NULL, *tmp_crtc;
++	struct intel_crtc *intel_crtc;
++	struct drm_framebuffer *fb;
+ 	struct intel_framebuffer *intel_fb;
+ 	struct drm_i915_gem_object *obj_priv;
+-	struct drm_crtc *tmp_crtc;
+-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	int plane = intel_crtc->plane;
+-	int crtcs_enabled = 0;
+ 
+ 	DRM_DEBUG_KMS("\n");
+ 
+@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ 	if (!I915_HAS_FBC(dev))
+ 		return;
+ 
+-	if (!crtc->fb)
+-		return;
+-
+-	intel_fb = to_intel_framebuffer(fb);
+-	obj_priv = to_intel_bo(intel_fb->obj);
+-
+ 	/*
+ 	 * If FBC is already on, we just have to verify that we can
+ 	 * keep it that way...
+@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
+ 	 */
+ 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+-		if (tmp_crtc->enabled)
+-			crtcs_enabled++;
++		if (tmp_crtc->enabled) {
++			if (crtc) {
++				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
++				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
++				goto out_disable;
++			}
++			crtc = tmp_crtc;
++		}
+ 	}
+-	DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+-	if (crtcs_enabled > 1) {
+-		DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+-		dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
++
++	if (!crtc || crtc->fb == NULL) {
++		DRM_DEBUG_KMS("no output, disabling\n");
++		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ 		goto out_disable;
+ 	}
++
++	intel_crtc = to_intel_crtc(crtc);
++	fb = crtc->fb;
++	intel_fb = to_intel_framebuffer(fb);
++	obj_priv = to_intel_bo(intel_fb->obj);
++
+ 	if (intel_fb->obj->size > dev_priv->cfb_size) {
+ 		DRM_DEBUG_KMS("framebuffer too large, disabling "
+-				"compression\n");
++			      "compression\n");
+ 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ 		goto out_disable;
+ 	}
+-	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+-	    (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
++	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
++	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ 		DRM_DEBUG_KMS("mode incompatible with compression, "
+-				"disabling\n");
++			      "disabling\n");
+ 		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ 		goto out_disable;
+ 	}
+-	if ((mode->hdisplay > 2048) ||
+-	    (mode->vdisplay > 1536)) {
++	if ((crtc->mode.hdisplay > 2048) ||
++	    (crtc->mode.vdisplay > 1536)) {
+ 		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ 		goto out_disable;
+ 	}
+-	if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
++	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ 		goto out_disable;
+@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ 	if (in_dbg_master())
+ 		goto out_disable;
+ 
+-	if (intel_fbc_enabled(dev)) {
+-		/* We can re-enable it in this case, but need to update pitch */
+-		if ((fb->pitch > dev_priv->cfb_pitch) ||
+-		    (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+-		    (plane != dev_priv->cfb_plane))
+-			intel_disable_fbc(dev);
+-	}
+-
+-	/* Now try to turn it back on if possible */
+-	if (!intel_fbc_enabled(dev))
+-		intel_enable_fbc(crtc, 500);
+-
++	intel_enable_fbc(crtc, 500);
+ 	return;
+ 
+ out_disable:
+@@ -1407,7 +1432,9 @@ out_disable:
+ }
+ 
+ int
+-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
++intel_pin_and_fence_fb_obj(struct drm_device *dev,
++			   struct drm_gem_object *obj,
++			   bool pipelined)
+ {
+ 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 	u32 alignment;
+@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ 	case I915_TILING_NONE:
+ 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ 			alignment = 128 * 1024;
+-		else if (IS_I965G(dev))
++		else if (INTEL_INFO(dev)->gen >= 4)
+ 			alignment = 4 * 1024;
+ 		else
+ 			alignment = 64 * 1024;
+@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ 	}
+ 
+ 	ret = i915_gem_object_pin(obj, alignment);
+-	if (ret != 0)
++	if (ret)
+ 		return ret;
+ 
++	ret = i915_gem_object_set_to_display_plane(obj, pipelined);
++	if (ret)
++		goto err_unpin;
++
+ 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
+ 	 * fence, whereas 965+ only requires a fence if using
+ 	 * framebuffer compression.  For simplicity, we always install
+@@ -1445,14 +1476,16 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ 	 */
+ 	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ 	    obj_priv->tiling_mode != I915_TILING_NONE) {
+-		ret = i915_gem_object_get_fence_reg(obj);
+-		if (ret != 0) {
+-			i915_gem_object_unpin(obj);
+-			return ret;
+-		}
++		ret = i915_gem_object_get_fence_reg(obj, false);
++		if (ret)
++			goto err_unpin;
+ 	}
+ 
+ 	return 0;
++
++err_unpin:
++	i915_gem_object_unpin(obj);
++	return ret;
+ }
+ 
+ /* Assume fb object is pinned & idle & fenced and just update base pointers */
+@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 	struct drm_gem_object *obj;
+ 	int plane = intel_crtc->plane;
+ 	unsigned long Start, Offset;
+-	int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+-	int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+-	int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+-	int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+-	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ 	u32 dspcntr;
++	u32 reg;
+ 
+ 	switch (plane) {
+ 	case 0:
+@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 	obj = intel_fb->obj;
+ 	obj_priv = to_intel_bo(obj);
+ 
+-	dspcntr = I915_READ(dspcntr_reg);
++	reg = DSPCNTR(plane);
++	dspcntr = I915_READ(reg);
+ 	/* Mask out pixel format bits in case we change it */
+ 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ 	switch (fb->bits_per_pixel) {
+@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 		DRM_ERROR("Unknown color depth\n");
+ 		return -EINVAL;
+ 	}
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		if (obj_priv->tiling_mode != I915_TILING_NONE)
+ 			dspcntr |= DISPPLANE_TILED;
+ 		else
+@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 		/* must disable */
+ 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ 
+-	I915_WRITE(dspcntr_reg, dspcntr);
++	I915_WRITE(reg, dspcntr);
+ 
+ 	Start = obj_priv->gtt_offset;
+ 	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ 
+ 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ 		      Start, Offset, x, y, fb->pitch);
+-	I915_WRITE(dspstride, fb->pitch);
+-	if (IS_I965G(dev)) {
+-		I915_WRITE(dspsurf, Start);
+-		I915_WRITE(dsptileoff, (y << 16) | x);
+-		I915_WRITE(dspbase, Offset);
+-	} else {
+-		I915_WRITE(dspbase, Start + Offset);
+-	}
+-	POSTING_READ(dspbase);
+-
+-	if (IS_I965G(dev) || plane == 0)
+-		intel_update_fbc(crtc, &crtc->mode);
++	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
++	if (INTEL_INFO(dev)->gen >= 4) {
++		I915_WRITE(DSPSURF(plane), Start);
++		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
++		I915_WRITE(DSPADDR(plane), Offset);
++	} else
++		I915_WRITE(DSPADDR(plane), Start + Offset);
++	POSTING_READ(reg);
+ 
+-	intel_wait_for_vblank(dev, intel_crtc->pipe);
+-	intel_increase_pllclock(crtc, true);
++	intel_update_fbc(dev);
++	intel_increase_pllclock(crtc);
+ 
+ 	return 0;
+ }
+@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_master_private *master_priv;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	struct intel_framebuffer *intel_fb;
+-	struct drm_i915_gem_object *obj_priv;
+-	struct drm_gem_object *obj;
+-	int pipe = intel_crtc->pipe;
+-	int plane = intel_crtc->plane;
+ 	int ret;
+ 
+ 	/* no fb bound */
+@@ -1566,45 +1587,41 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 		return 0;
+ 	}
+ 
+-	switch (plane) {
++	switch (intel_crtc->plane) {
+ 	case 0:
+ 	case 1:
+ 		break;
+ 	default:
+-		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ 		return -EINVAL;
+ 	}
+ 
+-	intel_fb = to_intel_framebuffer(crtc->fb);
+-	obj = intel_fb->obj;
+-	obj_priv = to_intel_bo(obj);
+-
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = intel_pin_and_fence_fb_obj(dev, obj);
++	ret = intel_pin_and_fence_fb_obj(dev,
++					 to_intel_framebuffer(crtc->fb)->obj,
++					 false);
+ 	if (ret != 0) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return ret;
+ 	}
+ 
+-	ret = i915_gem_object_set_to_display_plane(obj);
+-	if (ret != 0) {
+-		i915_gem_object_unpin(obj);
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
++	if (old_fb) {
++		struct drm_i915_private *dev_priv = dev->dev_private;
++		struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
++		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++
++		wait_event(dev_priv->pending_flip_queue,
++			   atomic_read(&obj_priv->pending_flip) == 0);
+ 	}
+ 
+ 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ 	if (ret) {
+-		i915_gem_object_unpin(obj);
++		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return ret;
+ 	}
+ 
+-	if (old_fb) {
+-		intel_fb = to_intel_framebuffer(old_fb);
+-		obj_priv = to_intel_bo(intel_fb->obj);
+-		i915_gem_object_unpin(intel_fb->obj);
+-	}
++	if (old_fb)
++		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+@@ -1615,7 +1632,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	if (!master_priv->sarea_priv)
+ 		return 0;
+ 
+-	if (pipe) {
++	if (intel_crtc->pipe) {
+ 		master_priv->sarea_priv->pipeB_x = x;
+ 		master_priv->sarea_priv->pipeB_y = y;
+ 	} else {
+@@ -1626,7 +1643,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	return 0;
+ }
+ 
+-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
++static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -1659,9 +1676,41 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+ 	}
+ 	I915_WRITE(DP_A, dpa_ctl);
+ 
++	POSTING_READ(DP_A);
+ 	udelay(500);
+ }
+ 
++static void intel_fdi_normal_train(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	u32 reg, temp;
++
++	/* enable normal train */
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
++	temp &= ~FDI_LINK_TRAIN_NONE;
++	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
++	I915_WRITE(reg, temp);
++
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	if (HAS_PCH_CPT(dev)) {
++		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
++		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
++	} else {
++		temp &= ~FDI_LINK_TRAIN_NONE;
++		temp |= FDI_LINK_TRAIN_NONE;
++	}
++	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
++
++	/* wait one idle pattern time */
++	POSTING_READ(reg);
++	udelay(1000);
++}
++
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+@@ -1669,84 +1718,88 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+-	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+-	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+-	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+-	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+-	u32 temp, tries = 0;
++	u32 reg, temp, tries;
+ 
+ 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ 	   for train result */
+-	temp = I915_READ(fdi_rx_imr_reg);
++	reg = FDI_RX_IMR(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_RX_SYMBOL_LOCK;
+ 	temp &= ~FDI_RX_BIT_LOCK;
+-	I915_WRITE(fdi_rx_imr_reg, temp);
+-	I915_READ(fdi_rx_imr_reg);
++	I915_WRITE(reg, temp);
++	I915_READ(reg);
+ 	udelay(150);
+ 
+ 	/* enable CPU FDI TX and PCH FDI RX */
+-	temp = I915_READ(fdi_tx_reg);
+-	temp |= FDI_TX_ENABLE;
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~(7 << 19);
+ 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+ 	temp |= FDI_LINK_TRAIN_PATTERN_1;
+-	I915_WRITE(fdi_tx_reg, temp);
+-	I915_READ(fdi_tx_reg);
++	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ 
+-	temp = I915_READ(fdi_rx_reg);
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+ 	temp |= FDI_LINK_TRAIN_PATTERN_1;
+-	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+-	I915_READ(fdi_rx_reg);
++	I915_WRITE(reg, temp | FDI_RX_ENABLE);
++
++	POSTING_READ(reg);
+ 	udelay(150);
+ 
++	/* Ironlake workaround, enable clock pointer after FDI enable*/
++	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
++
++	reg = FDI_RX_IIR(pipe);
+ 	for (tries = 0; tries < 5; tries++) {
+-		temp = I915_READ(fdi_rx_iir_reg);
++		temp = I915_READ(reg);
+ 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 		if ((temp & FDI_RX_BIT_LOCK)) {
+ 			DRM_DEBUG_KMS("FDI train 1 done.\n");
+-			I915_WRITE(fdi_rx_iir_reg,
+-				   temp | FDI_RX_BIT_LOCK);
++			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ 			break;
+ 		}
+ 	}
+ 	if (tries == 5)
+-		DRM_DEBUG_KMS("FDI train 1 fail!\n");
++		DRM_ERROR("FDI train 1 fail!\n");
+ 
+ 	/* Train 2 */
+-	temp = I915_READ(fdi_tx_reg);
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+ 	temp |= FDI_LINK_TRAIN_PATTERN_2;
+-	I915_WRITE(fdi_tx_reg, temp);
++	I915_WRITE(reg, temp);
+ 
+-	temp = I915_READ(fdi_rx_reg);
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+ 	temp |= FDI_LINK_TRAIN_PATTERN_2;
+-	I915_WRITE(fdi_rx_reg, temp);
+-	udelay(150);
++	I915_WRITE(reg, temp);
+ 
+-	tries = 0;
++	POSTING_READ(reg);
++	udelay(150);
+ 
++	reg = FDI_RX_IIR(pipe);
+ 	for (tries = 0; tries < 5; tries++) {
+-		temp = I915_READ(fdi_rx_iir_reg);
++		temp = I915_READ(reg);
+ 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 		if (temp & FDI_RX_SYMBOL_LOCK) {
+-			I915_WRITE(fdi_rx_iir_reg,
+-				   temp | FDI_RX_SYMBOL_LOCK);
++			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ 			DRM_DEBUG_KMS("FDI train 2 done.\n");
+ 			break;
+ 		}
+ 	}
+ 	if (tries == 5)
+-		DRM_DEBUG_KMS("FDI train 2 fail!\n");
++		DRM_ERROR("FDI train 2 fail!\n");
+ 
+ 	DRM_DEBUG_KMS("FDI train done\n");
++
+ }
+ 
+-static int snb_b_fdi_train_param [] = {
++static const int const snb_b_fdi_train_param [] = {
+ 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+@@ -1760,24 +1813,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+-	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+-	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+-	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+-	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+-	u32 temp, i;
++	u32 reg, temp, i;
+ 
+ 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ 	   for train result */
+-	temp = I915_READ(fdi_rx_imr_reg);
++	reg = FDI_RX_IMR(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_RX_SYMBOL_LOCK;
+ 	temp &= ~FDI_RX_BIT_LOCK;
+-	I915_WRITE(fdi_rx_imr_reg, temp);
+-	I915_READ(fdi_rx_imr_reg);
++	I915_WRITE(reg, temp);
++
++	POSTING_READ(reg);
+ 	udelay(150);
+ 
+ 	/* enable CPU FDI TX and PCH FDI RX */
+-	temp = I915_READ(fdi_tx_reg);
+-	temp |= FDI_TX_ENABLE;
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~(7 << 19);
+ 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+@@ -1785,10 +1836,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ 	/* SNB-B */
+ 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+-	I915_WRITE(fdi_tx_reg, temp);
+-	I915_READ(fdi_tx_reg);
++	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ 
+-	temp = I915_READ(fdi_rx_reg);
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	if (HAS_PCH_CPT(dev)) {
+ 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+@@ -1796,32 +1847,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ 		temp &= ~FDI_LINK_TRAIN_NONE;
+ 		temp |= FDI_LINK_TRAIN_PATTERN_1;
+ 	}
+-	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+-	I915_READ(fdi_rx_reg);
++	I915_WRITE(reg, temp | FDI_RX_ENABLE);
++
++	POSTING_READ(reg);
+ 	udelay(150);
+ 
+ 	for (i = 0; i < 4; i++ ) {
+-		temp = I915_READ(fdi_tx_reg);
++		reg = FDI_TX_CTL(pipe);
++		temp = I915_READ(reg);
+ 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ 		temp |= snb_b_fdi_train_param[i];
+-		I915_WRITE(fdi_tx_reg, temp);
++		I915_WRITE(reg, temp);
++
++		POSTING_READ(reg);
+ 		udelay(500);
+ 
+-		temp = I915_READ(fdi_rx_iir_reg);
++		reg = FDI_RX_IIR(pipe);
++		temp = I915_READ(reg);
+ 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 		if (temp & FDI_RX_BIT_LOCK) {
+-			I915_WRITE(fdi_rx_iir_reg,
+-				   temp | FDI_RX_BIT_LOCK);
++			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ 			DRM_DEBUG_KMS("FDI train 1 done.\n");
+ 			break;
+ 		}
+ 	}
+ 	if (i == 4)
+-		DRM_DEBUG_KMS("FDI train 1 fail!\n");
++		DRM_ERROR("FDI train 1 fail!\n");
+ 
+ 	/* Train 2 */
+-	temp = I915_READ(fdi_tx_reg);
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	temp &= ~FDI_LINK_TRAIN_NONE;
+ 	temp |= FDI_LINK_TRAIN_PATTERN_2;
+ 	if (IS_GEN6(dev)) {
+@@ -1829,9 +1885,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ 		/* SNB-B */
+ 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ 	}
+-	I915_WRITE(fdi_tx_reg, temp);
++	I915_WRITE(reg, temp);
+ 
+-	temp = I915_READ(fdi_rx_reg);
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
+ 	if (HAS_PCH_CPT(dev)) {
+ 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+@@ -1839,535 +1896,596 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ 		temp &= ~FDI_LINK_TRAIN_NONE;
+ 		temp |= FDI_LINK_TRAIN_PATTERN_2;
+ 	}
+-	I915_WRITE(fdi_rx_reg, temp);
++	I915_WRITE(reg, temp);
++
++	POSTING_READ(reg);
+ 	udelay(150);
+ 
+ 	for (i = 0; i < 4; i++ ) {
+-		temp = I915_READ(fdi_tx_reg);
++		reg = FDI_TX_CTL(pipe);
++		temp = I915_READ(reg);
+ 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ 		temp |= snb_b_fdi_train_param[i];
+-		I915_WRITE(fdi_tx_reg, temp);
++		I915_WRITE(reg, temp);
++
++		POSTING_READ(reg);
+ 		udelay(500);
+ 
+-		temp = I915_READ(fdi_rx_iir_reg);
++		reg = FDI_RX_IIR(pipe);
++		temp = I915_READ(reg);
+ 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 		if (temp & FDI_RX_SYMBOL_LOCK) {
+-			I915_WRITE(fdi_rx_iir_reg,
+-				   temp | FDI_RX_SYMBOL_LOCK);
++			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ 			DRM_DEBUG_KMS("FDI train 2 done.\n");
+ 			break;
+ 		}
+ 	}
+ 	if (i == 4)
+-		DRM_DEBUG_KMS("FDI train 2 fail!\n");
++		DRM_ERROR("FDI train 2 fail!\n");
+ 
+ 	DRM_DEBUG_KMS("FDI train done.\n");
+ }
+ 
+-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
++static void ironlake_fdi_enable(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+-	int plane = intel_crtc->plane;
+-	int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+-	int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
+-	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+-	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+-	int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
+-	int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
+-	int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
+-	int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
+-	int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
+-	int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
+-	int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+-	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
+-	u32 temp;
+-	u32 pipe_bpc;
+-
+-	temp = I915_READ(pipeconf_reg);
+-	pipe_bpc = temp & PIPE_BPC_MASK;
++	u32 reg, temp;
+ 
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
++	/* Write the TU size bits so error detection works */
++	I915_WRITE(FDI_RX_TUSIZE1(pipe),
++		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+ 
+-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+-			temp = I915_READ(PCH_LVDS);
+-			if ((temp & LVDS_PORT_EN) == 0) {
+-				I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+-				POSTING_READ(PCH_LVDS);
+-			}
+-		}
++	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	temp &= ~((0x7 << 19) | (0x7 << 16));
++	temp |= (intel_crtc->fdi_lanes - 1) << 19;
++	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+ 
+-		if (!HAS_eDP) {
++	POSTING_READ(reg);
++	udelay(200);
+ 
+-			/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+-			temp = I915_READ(fdi_rx_reg);
+-			/*
+-			 * make the BPC in FDI Rx be consistent with that in
+-			 * pipeconf reg.
+-			 */
+-			temp &= ~(0x7 << 16);
+-			temp |= (pipe_bpc << 11);
+-			temp &= ~(7 << 19);
+-			temp |= (intel_crtc->fdi_lanes - 1) << 19;
+-			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+-			I915_READ(fdi_rx_reg);
+-			udelay(200);
++	/* Switch from Rawclk to PCDclk */
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp | FDI_PCDCLK);
+ 
+-			/* Switch from Rawclk to PCDclk */
+-			temp = I915_READ(fdi_rx_reg);
+-			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+-			I915_READ(fdi_rx_reg);
+-			udelay(200);
++	POSTING_READ(reg);
++	udelay(200);
+ 
+-			/* Enable CPU FDI TX PLL, always on for Ironlake */
+-			temp = I915_READ(fdi_tx_reg);
+-			if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+-				I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+-				I915_READ(fdi_tx_reg);
+-				udelay(100);
+-			}
+-		}
++	/* Enable CPU FDI TX PLL, always on for Ironlake */
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
++	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
++		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ 
+-		/* Enable panel fitting for LVDS */
+-		if (dev_priv->pch_pf_size &&
+-		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+-		    || HAS_eDP || intel_pch_has_edp(crtc))) {
+-			/* Force use of hard-coded filter coefficients
+-			 * as some pre-programmed values are broken,
+-			 * e.g. x201.
+-			 */
+-			I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+-				   PF_ENABLE | PF_FILTER_MED_3x3);
+-			I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+-				   dev_priv->pch_pf_pos);
+-			I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+-				   dev_priv->pch_pf_size);
+-		}
++		POSTING_READ(reg);
++		udelay(100);
++	}
++}
+ 
+-		/* Enable CPU pipe */
+-		temp = I915_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0) {
+-			I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-			I915_READ(pipeconf_reg);
+-			udelay(100);
+-		}
++static void intel_flush_display_plane(struct drm_device *dev,
++				      int plane)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 reg = DSPADDR(plane);
++	I915_WRITE(reg, I915_READ(reg));
++}
+ 
+-		/* configure and enable CPU plane */
+-		temp = I915_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+-		}
++/*
++ * When we disable a pipe, we need to clear any pending scanline wait events
++ * to avoid hanging the ring, which we assume we are waiting on.
++ */
++static void intel_clear_scanline_wait(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 tmp;
+ 
+-		if (!HAS_eDP) {
+-			/* For PCH output, training FDI link */
+-			if (IS_GEN6(dev))
+-				gen6_fdi_link_train(crtc);
+-			else
+-				ironlake_fdi_link_train(crtc);
++	if (IS_GEN2(dev))
++		/* Can't break the hang on i8xx */
++		return;
+ 
+-			/* enable PCH DPLL */
+-			temp = I915_READ(pch_dpll_reg);
+-			if ((temp & DPLL_VCO_ENABLE) == 0) {
+-				I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+-				I915_READ(pch_dpll_reg);
+-			}
+-			udelay(200);
++	tmp = I915_READ(PRB0_CTL);
++	if (tmp & RING_WAIT) {
++		I915_WRITE(PRB0_CTL, tmp);
++		POSTING_READ(PRB0_CTL);
++	}
++}
+ 
+-			if (HAS_PCH_CPT(dev)) {
+-				/* Be sure PCH DPLL SEL is set */
+-				temp = I915_READ(PCH_DPLL_SEL);
+-				if (trans_dpll_sel == 0 &&
+-						(temp & TRANSA_DPLL_ENABLE) == 0)
+-					temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+-				else if (trans_dpll_sel == 1 &&
+-						(temp & TRANSB_DPLL_ENABLE) == 0)
+-					temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+-				I915_WRITE(PCH_DPLL_SEL, temp);
+-				I915_READ(PCH_DPLL_SEL);
+-			}
++static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
++{
++	struct drm_i915_gem_object *obj_priv;
++	struct drm_i915_private *dev_priv;
+ 
+-			/* set transcoder timing */
+-			I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
+-			I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
+-			I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
+-
+-			I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
+-			I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
+-			I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+-
+-			/* enable normal train */
+-			temp = I915_READ(fdi_tx_reg);
+-			temp &= ~FDI_LINK_TRAIN_NONE;
+-			I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+-					FDI_TX_ENHANCE_FRAME_ENABLE);
+-			I915_READ(fdi_tx_reg);
+-
+-			temp = I915_READ(fdi_rx_reg);
+-			if (HAS_PCH_CPT(dev)) {
+-				temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+-				temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+-			} else {
+-				temp &= ~FDI_LINK_TRAIN_NONE;
+-				temp |= FDI_LINK_TRAIN_NONE;
+-			}
+-			I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+-			I915_READ(fdi_rx_reg);
++	if (crtc->fb == NULL)
++		return;
+ 
+-			/* wait one idle pattern time */
+-			udelay(100);
++	obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
++	dev_priv = crtc->dev->dev_private;
++	wait_event(dev_priv->pending_flip_queue,
++		   atomic_read(&obj_priv->pending_flip) == 0);
++}
+ 
+-			/* For PCH DP, enable TRANS_DP_CTL */
+-			if (HAS_PCH_CPT(dev) &&
+-			    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+-				int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+-				int reg;
+-
+-				reg = I915_READ(trans_dp_ctl);
+-				reg &= ~(TRANS_DP_PORT_SEL_MASK |
+-					 TRANS_DP_SYNC_MASK);
+-				reg |= (TRANS_DP_OUTPUT_ENABLE |
+-					TRANS_DP_ENH_FRAMING);
+-
+-				if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+-				      reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+-				if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+-				      reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+-
+-				switch (intel_trans_dp_port_sel(crtc)) {
+-				case PCH_DP_B:
+-					reg |= TRANS_DP_PORT_SEL_B;
+-					break;
+-				case PCH_DP_C:
+-					reg |= TRANS_DP_PORT_SEL_C;
+-					break;
+-				case PCH_DP_D:
+-					reg |= TRANS_DP_PORT_SEL_D;
+-					break;
+-				default:
+-					DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+-					reg |= TRANS_DP_PORT_SEL_B;
+-					break;
+-				}
++static void ironlake_crtc_enable(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	int plane = intel_crtc->plane;
++	u32 reg, temp;
+ 
+-				I915_WRITE(trans_dp_ctl, reg);
+-				POSTING_READ(trans_dp_ctl);
+-			}
++	if (intel_crtc->active)
++		return;
+ 
+-			/* enable PCH transcoder */
+-			temp = I915_READ(transconf_reg);
+-			/*
+-			 * make the BPC in transcoder be consistent with
+-			 * that in pipeconf reg.
+-			 */
+-			temp &= ~PIPE_BPC_MASK;
+-			temp |= pipe_bpc;
+-			I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
+-			I915_READ(transconf_reg);
++	intel_crtc->active = true;
++	intel_update_watermarks(dev);
+ 
+-			if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
+-				DRM_ERROR("failed to enable transcoder\n");
+-		}
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++		temp = I915_READ(PCH_LVDS);
++		if ((temp & LVDS_PORT_EN) == 0)
++			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
++	}
+ 
+-		intel_crtc_load_lut(crtc);
++	ironlake_fdi_enable(crtc);
+ 
+-		intel_update_fbc(crtc, &crtc->mode);
+-		break;
++	/* Enable panel fitting for LVDS */
++	if (dev_priv->pch_pf_size &&
++	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
++		/* Force use of hard-coded filter coefficients
++		 * as some pre-programmed values are broken,
++		 * e.g. x201.
++		 */
++		I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
++			   PF_ENABLE | PF_FILTER_MED_3x3);
++		I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
++			   dev_priv->pch_pf_pos);
++		I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
++			   dev_priv->pch_pf_size);
++	}
++
++	/* Enable CPU pipe */
++	reg = PIPECONF(pipe);
++	temp = I915_READ(reg);
++	if ((temp & PIPECONF_ENABLE) == 0) {
++		I915_WRITE(reg, temp | PIPECONF_ENABLE);
++		POSTING_READ(reg);
++		intel_wait_for_vblank(dev, intel_crtc->pipe);
++	}
++
++	/* configure and enable CPU plane */
++	reg = DSPCNTR(plane);
++	temp = I915_READ(reg);
++	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
++		intel_flush_display_plane(dev, plane);
++	}
++
++	/* For PCH output, training FDI link */
++	if (IS_GEN6(dev))
++		gen6_fdi_link_train(crtc);
++	else
++		ironlake_fdi_link_train(crtc);
++
++	/* enable PCH DPLL */
++	reg = PCH_DPLL(pipe);
++	temp = I915_READ(reg);
++	if ((temp & DPLL_VCO_ENABLE) == 0) {
++		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
++		POSTING_READ(reg);
++		udelay(200);
++	}
+ 
+-	case DRM_MODE_DPMS_OFF:
+-		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
++	if (HAS_PCH_CPT(dev)) {
++		/* Be sure PCH DPLL SEL is set */
++		temp = I915_READ(PCH_DPLL_SEL);
++		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
++			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
++		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
++			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++		I915_WRITE(PCH_DPLL_SEL, temp);
++	}
++
++	/* set transcoder timing */
++	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
++	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
++	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
++
++	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
++	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
++	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
++
++	intel_fdi_normal_train(crtc);
++
++	/* For PCH DP, enable TRANS_DP_CTL */
++	if (HAS_PCH_CPT(dev) &&
++	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
++		reg = TRANS_DP_CTL(pipe);
++		temp = I915_READ(reg);
++		temp &= ~(TRANS_DP_PORT_SEL_MASK |
++			  TRANS_DP_SYNC_MASK);
++		temp |= (TRANS_DP_OUTPUT_ENABLE |
++			 TRANS_DP_ENH_FRAMING);
+ 
+-		drm_vblank_off(dev, pipe);
+-		/* Disable display plane */
+-		temp = I915_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+-			I915_READ(dspbase_reg);
++		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
++			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
++		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
++			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
++
++		switch (intel_trans_dp_port_sel(crtc)) {
++		case PCH_DP_B:
++			temp |= TRANS_DP_PORT_SEL_B;
++			break;
++		case PCH_DP_C:
++			temp |= TRANS_DP_PORT_SEL_C;
++			break;
++		case PCH_DP_D:
++			temp |= TRANS_DP_PORT_SEL_D;
++			break;
++		default:
++			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
++			temp |= TRANS_DP_PORT_SEL_B;
++			break;
+ 		}
+ 
+-		if (dev_priv->cfb_plane == plane &&
+-		    dev_priv->display.disable_fbc)
+-			dev_priv->display.disable_fbc(dev);
++		I915_WRITE(reg, temp);
++	}
+ 
+-		/* disable cpu pipe, disable after all planes disabled */
+-		temp = I915_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++	/* enable PCH transcoder */
++	reg = TRANSCONF(pipe);
++	temp = I915_READ(reg);
++	/*
++	 * make the BPC in transcoder be consistent with
++	 * that in pipeconf reg.
++	 */
++	temp &= ~PIPE_BPC_MASK;
++	temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
++	I915_WRITE(reg, temp | TRANS_ENABLE);
++	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
++		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+ 
+-			/* wait for cpu pipe off, pipe state */
+-			if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
+-				DRM_ERROR("failed to turn off cpu pipe\n");
+-		} else
+-			DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
++	intel_crtc_load_lut(crtc);
++	intel_update_fbc(dev);
++	intel_crtc_update_cursor(crtc, true);
++}
+ 
+-		udelay(100);
++static void ironlake_crtc_disable(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	int plane = intel_crtc->plane;
++	u32 reg, temp;
+ 
+-		/* Disable PF */
+-		I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+-		I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
++	if (!intel_crtc->active)
++		return;
+ 
+-		/* disable CPU FDI tx and PCH FDI rx */
+-		temp = I915_READ(fdi_tx_reg);
+-		I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
+-		I915_READ(fdi_tx_reg);
++	intel_crtc_wait_for_pending_flips(crtc);
++	drm_vblank_off(dev, pipe);
++	intel_crtc_update_cursor(crtc, false);
+ 
+-		temp = I915_READ(fdi_rx_reg);
+-		/* BPC in FDI rx is consistent with that in pipeconf */
+-		temp &= ~(0x07 << 16);
+-		temp |= (pipe_bpc << 11);
+-		I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
+-		I915_READ(fdi_rx_reg);
++	/* Disable display plane */
++	reg = DSPCNTR(plane);
++	temp = I915_READ(reg);
++	if (temp & DISPLAY_PLANE_ENABLE) {
++		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
++		intel_flush_display_plane(dev, plane);
++	}
+ 
+-		udelay(100);
++	if (dev_priv->cfb_plane == plane &&
++	    dev_priv->display.disable_fbc)
++		dev_priv->display.disable_fbc(dev);
+ 
+-		/* still set train pattern 1 */
+-		temp = I915_READ(fdi_tx_reg);
++	/* disable cpu pipe, disable after all planes disabled */
++	reg = PIPECONF(pipe);
++	temp = I915_READ(reg);
++	if (temp & PIPECONF_ENABLE) {
++		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
++		POSTING_READ(reg);
++		/* wait for cpu pipe off, pipe state */
++		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
++	}
++
++	/* Disable PF */
++	I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
++	I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
++
++	/* disable CPU FDI tx and PCH FDI rx */
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
++	POSTING_READ(reg);
++
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	temp &= ~(0x7 << 16);
++	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
++
++	POSTING_READ(reg);
++	udelay(100);
++
++	/* Ironlake workaround, disable clock pointer after downing FDI */
++	if (HAS_PCH_IBX(dev))
++		I915_WRITE(FDI_RX_CHICKEN(pipe),
++			   I915_READ(FDI_RX_CHICKEN(pipe) &
++				     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
++
++	/* still set train pattern 1 */
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
++	temp &= ~FDI_LINK_TRAIN_NONE;
++	temp |= FDI_LINK_TRAIN_PATTERN_1;
++	I915_WRITE(reg, temp);
++
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	if (HAS_PCH_CPT(dev)) {
++		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
++		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
++	} else {
+ 		temp &= ~FDI_LINK_TRAIN_NONE;
+ 		temp |= FDI_LINK_TRAIN_PATTERN_1;
+-		I915_WRITE(fdi_tx_reg, temp);
+-		POSTING_READ(fdi_tx_reg);
+-
+-		temp = I915_READ(fdi_rx_reg);
+-		if (HAS_PCH_CPT(dev)) {
+-			temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+-			temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+-		} else {
+-			temp &= ~FDI_LINK_TRAIN_NONE;
+-			temp |= FDI_LINK_TRAIN_PATTERN_1;
+-		}
+-		I915_WRITE(fdi_rx_reg, temp);
+-		POSTING_READ(fdi_rx_reg);
++	}
++	/* BPC in FDI rx is consistent with that in PIPECONF */
++	temp &= ~(0x07 << 16);
++	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++	I915_WRITE(reg, temp);
+ 
+-		udelay(100);
++	POSTING_READ(reg);
++	udelay(100);
+ 
+-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+-			temp = I915_READ(PCH_LVDS);
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++		temp = I915_READ(PCH_LVDS);
++		if (temp & LVDS_PORT_EN) {
+ 			I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+-			I915_READ(PCH_LVDS);
++			POSTING_READ(PCH_LVDS);
+ 			udelay(100);
+ 		}
++	}
+ 
+-		/* disable PCH transcoder */
+-		temp = I915_READ(transconf_reg);
+-		if ((temp & TRANS_ENABLE) != 0) {
+-			I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
++	/* disable PCH transcoder */
++	reg = TRANSCONF(plane);
++	temp = I915_READ(reg);
++	if (temp & TRANS_ENABLE) {
++		I915_WRITE(reg, temp & ~TRANS_ENABLE);
++		/* wait for PCH transcoder off, transcoder state */
++		if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
++			DRM_ERROR("failed to disable transcoder\n");
++	}
+ 
+-			/* wait for PCH transcoder off, transcoder state */
+-			if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
+-				DRM_ERROR("failed to disable transcoder\n");
+-		}
++	if (HAS_PCH_CPT(dev)) {
++		/* disable TRANS_DP_CTL */
++		reg = TRANS_DP_CTL(pipe);
++		temp = I915_READ(reg);
++		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
++		I915_WRITE(reg, temp);
+ 
+-		temp = I915_READ(transconf_reg);
+-		/* BPC in transcoder is consistent with that in pipeconf */
+-		temp &= ~PIPE_BPC_MASK;
+-		temp |= pipe_bpc;
+-		I915_WRITE(transconf_reg, temp);
+-		I915_READ(transconf_reg);
+-		udelay(100);
++		/* disable DPLL_SEL */
++		temp = I915_READ(PCH_DPLL_SEL);
++		if (pipe == 0)
++			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
++		else
++			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++		I915_WRITE(PCH_DPLL_SEL, temp);
++	}
+ 
+-		if (HAS_PCH_CPT(dev)) {
+-			/* disable TRANS_DP_CTL */
+-			int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+-			int reg;
++	/* disable PCH DPLL */
++	reg = PCH_DPLL(pipe);
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+ 
+-			reg = I915_READ(trans_dp_ctl);
+-			reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+-			I915_WRITE(trans_dp_ctl, reg);
+-			POSTING_READ(trans_dp_ctl);
++	/* Switch from PCDclk to Rawclk */
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+ 
+-			/* disable DPLL_SEL */
+-			temp = I915_READ(PCH_DPLL_SEL);
+-			if (trans_dpll_sel == 0)
+-				temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+-			else
+-				temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+-			I915_WRITE(PCH_DPLL_SEL, temp);
+-			I915_READ(PCH_DPLL_SEL);
++	/* Disable CPU FDI TX PLL */
++	reg = FDI_TX_CTL(pipe);
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+ 
+-		}
++	POSTING_READ(reg);
++	udelay(100);
+ 
+-		/* disable PCH DPLL */
+-		temp = I915_READ(pch_dpll_reg);
+-		I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+-		I915_READ(pch_dpll_reg);
+-
+-		/* Switch from PCDclk to Rawclk */
+-		temp = I915_READ(fdi_rx_reg);
+-		temp &= ~FDI_SEL_PCDCLK;
+-		I915_WRITE(fdi_rx_reg, temp);
+-		I915_READ(fdi_rx_reg);
+-
+-		/* Disable CPU FDI TX PLL */
+-		temp = I915_READ(fdi_tx_reg);
+-		I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+-		I915_READ(fdi_tx_reg);
+-		udelay(100);
++	reg = FDI_RX_CTL(pipe);
++	temp = I915_READ(reg);
++	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+ 
+-		temp = I915_READ(fdi_rx_reg);
+-		temp &= ~FDI_RX_PLL_ENABLE;
+-		I915_WRITE(fdi_rx_reg, temp);
+-		I915_READ(fdi_rx_reg);
++	/* Wait for the clocks to turn off. */
++	POSTING_READ(reg);
++	udelay(100);
+ 
+-		/* Wait for the clocks to turn off. */
+-		udelay(100);
++	intel_crtc->active = false;
++	intel_update_watermarks(dev);
++	intel_update_fbc(dev);
++	intel_clear_scanline_wait(dev);
++}
++
++static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	int plane = intel_crtc->plane;
++
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
++		ironlake_crtc_enable(crtc);
++		break;
++
++	case DRM_MODE_DPMS_OFF:
++		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
++		ironlake_crtc_disable(crtc);
+ 		break;
+ 	}
+ }
+ 
+ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+ {
+-	struct intel_overlay *overlay;
+-	int ret;
+-
+ 	if (!enable && intel_crtc->overlay) {
+-		overlay = intel_crtc->overlay;
+-		mutex_lock(&overlay->dev->struct_mutex);
+-		for (;;) {
+-			ret = intel_overlay_switch_off(overlay);
+-			if (ret == 0)
+-				break;
++		struct drm_device *dev = intel_crtc->base.dev;
+ 
+-			ret = intel_overlay_recover_from_interrupt(overlay, 0);
+-			if (ret != 0) {
+-				/* overlay doesn't react anymore. Usually
+-				 * results in a black screen and an unkillable
+-				 * X server. */
+-				BUG();
+-				overlay->hw_wedged = HW_WEDGED;
+-				break;
+-			}
+-		}
+-		mutex_unlock(&overlay->dev->struct_mutex);
++		mutex_lock(&dev->struct_mutex);
++		(void) intel_overlay_switch_off(intel_crtc->overlay, false);
++		mutex_unlock(&dev->struct_mutex);
+ 	}
+-	/* Let userspace switch the overlay on again. In most cases userspace
+-	 * has to recompute where to put it anyway. */
+ 
+-	return;
++	/* Let userspace switch the overlay on again. In most cases userspace
++	 * has to recompute where to put it anyway.
++	 */
+ }
+ 
+-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
++static void i9xx_crtc_enable(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+ 	int plane = intel_crtc->plane;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+-	int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	u32 temp;
++	u32 reg, temp;
+ 
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable the DPLL */
+-		temp = I915_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) == 0) {
+-			I915_WRITE(dpll_reg, temp);
+-			I915_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			I915_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			I915_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-		}
++	if (intel_crtc->active)
++		return;
+ 
+-		/* Enable the pipe */
+-		temp = I915_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0)
+-			I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-
+-		/* Enable the plane */
+-		temp = I915_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+-		}
++	intel_crtc->active = true;
++	intel_update_watermarks(dev);
+ 
+-		intel_crtc_load_lut(crtc);
++	/* Enable the DPLL */
++	reg = DPLL(pipe);
++	temp = I915_READ(reg);
++	if ((temp & DPLL_VCO_ENABLE) == 0) {
++		I915_WRITE(reg, temp);
+ 
+-		if ((IS_I965G(dev) || plane == 0))
+-			intel_update_fbc(crtc, &crtc->mode);
++		/* Wait for the clocks to stabilize. */
++		POSTING_READ(reg);
++		udelay(150);
+ 
+-		/* Give the overlay scaler a chance to enable if it's on this pipe */
+-		intel_crtc_dpms_overlay(intel_crtc, true);
+-	break;
+-	case DRM_MODE_DPMS_OFF:
+-		/* Give the overlay scaler a chance to disable if it's on this pipe */
+-		intel_crtc_dpms_overlay(intel_crtc, false);
+-		drm_vblank_off(dev, pipe);
+-
+-		if (dev_priv->cfb_plane == plane &&
+-		    dev_priv->display.disable_fbc)
+-			dev_priv->display.disable_fbc(dev);
+-
+-		/* Disable display plane */
+-		temp = I915_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+-			I915_READ(dspbase_reg);
+-		}
++		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
++
++		/* Wait for the clocks to stabilize. */
++		POSTING_READ(reg);
++		udelay(150);
++
++		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+ 
+-		/* Don't disable pipe A or pipe A PLLs if needed */
+-		if (pipeconf_reg == PIPEACONF &&
+-		    (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+-			/* Wait for vblank for the disable to take effect */
++		/* Wait for the clocks to stabilize. */
++		POSTING_READ(reg);
++		udelay(150);
++	}
++
++	/* Enable the pipe */
++	reg = PIPECONF(pipe);
++	temp = I915_READ(reg);
++	if ((temp & PIPECONF_ENABLE) == 0)
++		I915_WRITE(reg, temp | PIPECONF_ENABLE);
++
++	/* Enable the plane */
++	reg = DSPCNTR(plane);
++	temp = I915_READ(reg);
++	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
++		intel_flush_display_plane(dev, plane);
++	}
++
++	intel_crtc_load_lut(crtc);
++	intel_update_fbc(dev);
++
++	/* Give the overlay scaler a chance to enable if it's on this pipe */
++	intel_crtc_dpms_overlay(intel_crtc, true);
++	intel_crtc_update_cursor(crtc, true);
++}
++
++static void i9xx_crtc_disable(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	int plane = intel_crtc->plane;
++	u32 reg, temp;
++
++	if (!intel_crtc->active)
++		return;
++
++	/* Give the overlay scaler a chance to disable if it's on this pipe */
++	intel_crtc_wait_for_pending_flips(crtc);
++	drm_vblank_off(dev, pipe);
++	intel_crtc_dpms_overlay(intel_crtc, false);
++	intel_crtc_update_cursor(crtc, false);
++
++	if (dev_priv->cfb_plane == plane &&
++	    dev_priv->display.disable_fbc)
++		dev_priv->display.disable_fbc(dev);
++
++	/* Disable display plane */
++	reg = DSPCNTR(plane);
++	temp = I915_READ(reg);
++	if (temp & DISPLAY_PLANE_ENABLE) {
++		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
++		/* Flush the plane changes */
++		intel_flush_display_plane(dev, plane);
++
++		/* Wait for vblank for the disable to take effect */
++		if (IS_GEN2(dev))
+ 			intel_wait_for_vblank(dev, pipe);
+-			goto skip_pipe_off;
+-		}
++	}
+ 
+-		/* Next, disable display pipes */
+-		temp = I915_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+-			I915_READ(pipeconf_reg);
+-		}
++	/* Don't disable pipe A or pipe A PLLs if needed */
++	if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
++		goto done;
++
++	/* Next, disable display pipes */
++	reg = PIPECONF(pipe);
++	temp = I915_READ(reg);
++	if (temp & PIPECONF_ENABLE) {
++		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+ 
+ 		/* Wait for the pipe to turn off */
++		POSTING_READ(reg);
+ 		intel_wait_for_pipe_off(dev, pipe);
++	}
++
++	reg = DPLL(pipe);
++	temp = I915_READ(reg);
++	if (temp & DPLL_VCO_ENABLE) {
++		I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+ 
+-		temp = I915_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) != 0) {
+-			I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+-			I915_READ(dpll_reg);
+-		}
+-	skip_pipe_off:
+ 		/* Wait for the clocks to turn off. */
++		POSTING_READ(reg);
+ 		udelay(150);
++	}
++
++done:
++	intel_crtc->active = false;
++	intel_update_fbc(dev);
++	intel_update_watermarks(dev);
++	intel_clear_scanline_wait(dev);
++}
++
++static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		i9xx_crtc_enable(crtc);
++		break;
++	case DRM_MODE_DPMS_OFF:
++		i9xx_crtc_disable(crtc);
+ 		break;
+ 	}
+ }
+@@ -2388,26 +2506,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		return;
+ 
+ 	intel_crtc->dpms_mode = mode;
+-	intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
+-
+-	/* When switching on the display, ensure that SR is disabled
+-	 * with multiple pipes prior to enabling to new pipe.
+-	 *
+-	 * When switching off the display, make sure the cursor is
+-	 * properly hidden prior to disabling the pipe.
+-	 */
+-	if (mode == DRM_MODE_DPMS_ON)
+-		intel_update_watermarks(dev);
+-	else
+-		intel_crtc_update_cursor(crtc);
+ 
+ 	dev_priv->display.dpms(crtc, mode);
+ 
+-	if (mode == DRM_MODE_DPMS_ON)
+-		intel_crtc_update_cursor(crtc);
+-	else
+-		intel_update_watermarks(dev);
+-
+ 	if (!dev->primary->master)
+ 		return;
+ 
+@@ -2432,16 +2533,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 	}
+ }
+ 
+-static void intel_crtc_prepare (struct drm_crtc *crtc)
++static void intel_crtc_disable(struct drm_crtc *crtc)
+ {
+ 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	struct drm_device *dev = crtc->dev;
++
+ 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++	if (crtc->fb) {
++		mutex_lock(&dev->struct_mutex);
++		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
++		mutex_unlock(&dev->struct_mutex);
++	}
++}
++
++/* Prepare for a mode set.
++ *
++ * Note we could be a lot smarter here.  We need to figure out which outputs
++ * will be enabled, which disabled (in short, how the config will changes)
++ * and perform the minimum necessary steps to accomplish that, e.g. updating
++ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
++ * panel fitting is in the proper state, etc.
++ */
++static void i9xx_crtc_prepare(struct drm_crtc *crtc)
++{
++	i9xx_crtc_disable(crtc);
+ }
+ 
+-static void intel_crtc_commit (struct drm_crtc *crtc)
++static void i9xx_crtc_commit(struct drm_crtc *crtc)
+ {
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++	i9xx_crtc_enable(crtc);
++}
++
++static void ironlake_crtc_prepare(struct drm_crtc *crtc)
++{
++	ironlake_crtc_disable(crtc);
++}
++
++static void ironlake_crtc_commit(struct drm_crtc *crtc)
++{
++	ironlake_crtc_enable(crtc);
+ }
+ 
+ void intel_encoder_prepare (struct drm_encoder *encoder)
+@@ -2460,13 +2591,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
+ 
+ void intel_encoder_destroy(struct drm_encoder *encoder)
+ {
+-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-
+-	if (intel_encoder->ddc_bus)
+-		intel_i2c_destroy(intel_encoder->ddc_bus);
+-
+-	if (intel_encoder->i2c_bus)
+-		intel_i2c_destroy(intel_encoder->i2c_bus);
++	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ 
+ 	drm_encoder_cleanup(encoder);
+ 	kfree(intel_encoder);
+@@ -2557,33 +2682,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
+ 	return 133000;
+ }
+ 
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-int intel_panel_fitter_pipe (struct drm_device *dev)
+-{
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32  pfit_control;
+-
+-	/* i830 doesn't have a panel fitter */
+-	if (IS_I830(dev))
+-		return -1;
+-
+-	pfit_control = I915_READ(PFIT_CONTROL);
+-
+-	/* See if the panel fitter is in use */
+-	if ((pfit_control & PFIT_ENABLE) == 0)
+-		return -1;
+-
+-	/* 965 can place panel fitter on either pipe */
+-	if (IS_I965G(dev))
+-		return (pfit_control >> 29) & 0x3;
+-
+-	/* older chips can only use pipe 1 */
+-	return 1;
+-}
+-
+ struct fdi_m_n {
+ 	u32        tu;
+ 	u32        gmch_m;
+@@ -2902,7 +3000,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+ 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+ 
+ 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+-			plane ? "B" : "A", size);
++		      plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+@@ -2919,7 +3017,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+ 	size >>= 1; /* Convert to cachelines */
+ 
+ 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+-			plane ? "B" : "A", size);
++		      plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+@@ -2934,8 +3032,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
+ 	size >>= 2; /* Convert to cachelines */
+ 
+ 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+-			plane ? "B" : "A",
+-		  size);
++		      plane ? "B" : "A",
++		      size);
+ 
+ 	return size;
+ }
+@@ -2950,14 +3048,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
+ 	size >>= 1; /* Convert to cachelines */
+ 
+ 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+-			plane ? "B" : "A", size);
++		      plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+ 
+ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
+-			  int planeb_clock, int sr_hdisplay, int unused,
+-			  int pixel_size)
++			       int planeb_clock, int sr_hdisplay, int unused,
++			       int pixel_size)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	const struct cxsr_latency *latency;
+@@ -3069,13 +3167,13 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
+ 
+ 		/* Use ns/us then divide to preserve precision */
+ 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+-			      pixel_size * sr_hdisplay;
++			pixel_size * sr_hdisplay;
+ 		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+ 
+ 		entries_required = (((sr_latency_ns / line_time_us) +
+ 				     1000) / 1000) * pixel_size * 64;
+ 		entries_required = DIV_ROUND_UP(entries_required,
+-					   g4x_cursor_wm_info.cacheline_size);
++						g4x_cursor_wm_info.cacheline_size);
+ 		cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
+ 
+ 		if (cursor_sr > g4x_cursor_wm_info.max_wm)
+@@ -3087,7 +3185,7 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
+ 	} else {
+ 		/* Turn off self refresh if both pipes are enabled */
+ 		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+-					& ~FW_BLC_SELF_EN);
++			   & ~FW_BLC_SELF_EN);
+ 	}
+ 
+ 	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
+@@ -3125,7 +3223,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ 
+ 		/* Use ns/us then divide to preserve precision */
+ 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+-			      pixel_size * sr_hdisplay;
++			pixel_size * sr_hdisplay;
+ 		sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
+ 		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ 		srwm = I965_FIFO_SIZE - sr_entries;
+@@ -3134,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ 		srwm &= 0x1ff;
+ 
+ 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+-			     pixel_size * 64;
++			pixel_size * 64;
+ 		sr_entries = DIV_ROUND_UP(sr_entries,
+ 					  i965_cursor_wm_info.cacheline_size);
+ 		cursor_sr = i965_cursor_wm_info.fifo_size -
+-			    (sr_entries + i965_cursor_wm_info.guard_size);
++			(sr_entries + i965_cursor_wm_info.guard_size);
+ 
+ 		if (cursor_sr > i965_cursor_wm_info.max_wm)
+ 			cursor_sr = i965_cursor_wm_info.max_wm;
+@@ -3146,11 +3244,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ 			      "cursor %d\n", srwm, cursor_sr);
+ 
+-		if (IS_I965GM(dev))
++		if (IS_CRESTLINE(dev))
+ 			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ 	} else {
+ 		/* Turn off self refresh if both pipes are enabled */
+-		if (IS_I965GM(dev))
++		if (IS_CRESTLINE(dev))
+ 			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ 				   & ~FW_BLC_SELF_EN);
+ 	}
+@@ -3180,9 +3278,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 	int sr_clock, sr_entries = 0;
+ 
+ 	/* Create copies of the base settings for each pipe */
+-	if (IS_I965GM(dev) || IS_I945GM(dev))
++	if (IS_CRESTLINE(dev) || IS_I945GM(dev))
+ 		planea_params = planeb_params = i945_wm_info;
+-	else if (IS_I9XX(dev))
++	else if (!IS_GEN2(dev))
+ 		planea_params = planeb_params = i915_wm_info;
+ 	else
+ 		planea_params = planeb_params = i855_wm_info;
+@@ -3217,7 +3315,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 
+ 		/* Use ns/us then divide to preserve precision */
+ 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+-			      pixel_size * sr_hdisplay;
++			pixel_size * sr_hdisplay;
+ 		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+ 		DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
+ 		srwm = total_size - sr_entries;
+@@ -3242,7 +3340,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 	}
+ 
+ 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+-		  planea_wm, planeb_wm, cwm, srwm);
++		      planea_wm, planeb_wm, cwm, srwm);
+ 
+ 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ 	fwater_hi = (cwm & 0x1f);
+@@ -3276,146 +3374,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ #define ILK_LP0_PLANE_LATENCY		700
+ #define ILK_LP0_CURSOR_LATENCY		1300
+ 
+-static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
+-		       int planeb_clock, int sr_hdisplay, int sr_htotal,
+-		       int pixel_size)
++static bool ironlake_compute_wm0(struct drm_device *dev,
++				 int pipe,
++				 int *plane_wm,
++				 int *cursor_wm)
+ {
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+-	int sr_wm, cursor_wm;
+-	unsigned long line_time_us;
+-	int sr_clock, entries_required;
+-	u32 reg_value;
+-	int line_count;
+-	int planea_htotal = 0, planeb_htotal = 0;
+ 	struct drm_crtc *crtc;
++	int htotal, hdisplay, clock, pixel_size = 0;
++	int line_time_us, line_count, entries;
+ 
+-	/* Need htotal for all active display plane */
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+-			if (intel_crtc->plane == 0)
+-				planea_htotal = crtc->mode.htotal;
+-			else
+-				planeb_htotal = crtc->mode.htotal;
+-		}
+-	}
+-
+-	/* Calculate and update the watermark for plane A */
+-	if (planea_clock) {
+-		entries_required = ((planea_clock / 1000) * pixel_size *
+-				     ILK_LP0_PLANE_LATENCY) / 1000;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_display_wm_info.cacheline_size);
+-		planea_wm = entries_required +
+-			    ironlake_display_wm_info.guard_size;
+-
+-		if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+-			planea_wm = ironlake_display_wm_info.max_wm;
+-
+-		/* Use the large buffer method to calculate cursor watermark */
+-		line_time_us = (planea_htotal * 1000) / planea_clock;
+-
+-		/* Use ns/us then divide to preserve precision */
+-		line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+-
+-		/* calculate the cursor watermark for cursor A */
+-		entries_required = line_count * 64 * pixel_size;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_cursor_wm_info.cacheline_size);
+-		cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+-		if (cursora_wm > ironlake_cursor_wm_info.max_wm)
+-			cursora_wm = ironlake_cursor_wm_info.max_wm;
+-
+-		reg_value = I915_READ(WM0_PIPEA_ILK);
+-		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+-		reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+-			     (cursora_wm & WM0_PIPE_CURSOR_MASK);
+-		I915_WRITE(WM0_PIPEA_ILK, reg_value);
+-		DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+-				"cursor: %d\n", planea_wm, cursora_wm);
+-	}
+-	/* Calculate and update the watermark for plane B */
+-	if (planeb_clock) {
+-		entries_required = ((planeb_clock / 1000) * pixel_size *
+-				     ILK_LP0_PLANE_LATENCY) / 1000;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_display_wm_info.cacheline_size);
+-		planeb_wm = entries_required +
+-			    ironlake_display_wm_info.guard_size;
+-
+-		if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+-			planeb_wm = ironlake_display_wm_info.max_wm;
++	crtc = intel_get_crtc_for_pipe(dev, pipe);
++	if (crtc->fb == NULL || !crtc->enabled)
++		return false;
+ 
+-		/* Use the large buffer method to calculate cursor watermark */
+-		line_time_us = (planeb_htotal * 1000) / planeb_clock;
++	htotal = crtc->mode.htotal;
++	hdisplay = crtc->mode.hdisplay;
++	clock = crtc->mode.clock;
++	pixel_size = crtc->fb->bits_per_pixel / 8;
++
++	/* Use the small buffer method to calculate plane watermark */
++	entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
++	entries = DIV_ROUND_UP(entries,
++			       ironlake_display_wm_info.cacheline_size);
++	*plane_wm = entries + ironlake_display_wm_info.guard_size;
++	if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
++		*plane_wm = ironlake_display_wm_info.max_wm;
++
++	/* Use the large buffer method to calculate cursor watermark */
++	line_time_us = ((htotal * 1000) / clock);
++	line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
++	entries = line_count * 64 * pixel_size;
++	entries = DIV_ROUND_UP(entries,
++			       ironlake_cursor_wm_info.cacheline_size);
++	*cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
++	if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
++		*cursor_wm = ironlake_cursor_wm_info.max_wm;
+ 
+-		/* Use ns/us then divide to preserve precision */
+-		line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
++	return true;
++}
+ 
+-		/* calculate the cursor watermark for cursor B */
+-		entries_required = line_count * 64 * pixel_size;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_cursor_wm_info.cacheline_size);
+-		cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+-		if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
+-			cursorb_wm = ironlake_cursor_wm_info.max_wm;
++static void ironlake_update_wm(struct drm_device *dev,
++			       int planea_clock, int planeb_clock,
++			       int sr_hdisplay, int sr_htotal,
++			       int pixel_size)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int plane_wm, cursor_wm, enabled;
++	int tmp;
++
++	enabled = 0;
++	if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
++		I915_WRITE(WM0_PIPEA_ILK,
++			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
++			      " plane %d, " "cursor: %d\n",
++			      plane_wm, cursor_wm);
++		enabled++;
++	}
+ 
+-		reg_value = I915_READ(WM0_PIPEB_ILK);
+-		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+-		reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+-			     (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+-		I915_WRITE(WM0_PIPEB_ILK, reg_value);
+-		DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+-				"cursor: %d\n", planeb_wm, cursorb_wm);
++	if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
++		I915_WRITE(WM0_PIPEB_ILK,
++			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
++			      " plane %d, cursor: %d\n",
++			      plane_wm, cursor_wm);
++		enabled++;
+ 	}
+ 
+ 	/*
+ 	 * Calculate and update the self-refresh watermark only when one
+ 	 * display plane is used.
+ 	 */
+-	if (!planea_clock || !planeb_clock) {
+-
++	tmp = 0;
++	if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
++		unsigned long line_time_us;
++		int small, large, plane_fbc;
++		int sr_clock, entries;
++		int line_count, line_size;
+ 		/* Read the self-refresh latency. The unit is 0.5us */
+ 		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+ 
+ 		sr_clock = planea_clock ? planea_clock : planeb_clock;
+-		line_time_us = ((sr_htotal * 1000) / sr_clock);
++		line_time_us = (sr_htotal * 1000) / sr_clock;
+ 
+ 		/* Use ns/us then divide to preserve precision */
+ 		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+-			       / 1000;
++			/ 1000;
++		line_size = sr_hdisplay * pixel_size;
+ 
+-		/* calculate the self-refresh watermark for display plane */
+-		entries_required = line_count * sr_hdisplay * pixel_size;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_display_srwm_info.cacheline_size);
+-		sr_wm = entries_required +
+-			ironlake_display_srwm_info.guard_size;
++		/* Use the minimum of the small and large buffer method for primary */
++		small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
++		large = line_count * line_size;
+ 
+-		/* calculate the self-refresh watermark for display cursor */
+-		entries_required = line_count * pixel_size * 64;
+-		entries_required = DIV_ROUND_UP(entries_required,
+-						ironlake_cursor_srwm_info.cacheline_size);
+-		cursor_wm = entries_required +
+-			    ironlake_cursor_srwm_info.guard_size;
++		entries = DIV_ROUND_UP(min(small, large),
++				       ironlake_display_srwm_info.cacheline_size);
+ 
+-		/* configure watermark and enable self-refresh */
+-		reg_value = I915_READ(WM1_LP_ILK);
+-		reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+-			       WM1_LP_CURSOR_MASK);
+-		reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+-			     (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
++		plane_fbc = entries * 64;
++		plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+ 
+-		I915_WRITE(WM1_LP_ILK, reg_value);
+-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+-				"cursor %d\n", sr_wm, cursor_wm);
++		plane_wm = entries + ironlake_display_srwm_info.guard_size;
++		if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
++			plane_wm = ironlake_display_srwm_info.max_wm;
+ 
+-	} else {
+-		/* Turn off self refresh if both pipes are enabled */
+-		I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+-	}
++		/* calculate the self-refresh watermark for display cursor */
++		entries = line_count * pixel_size * 64;
++		entries = DIV_ROUND_UP(entries,
++				       ironlake_cursor_srwm_info.cacheline_size);
++
++		cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
++		if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
++			cursor_wm = ironlake_cursor_srwm_info.max_wm;
++
++		/* configure watermark and enable self-refresh */
++		tmp = (WM1_LP_SR_EN |
++		       (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
++		       (plane_fbc << WM1_LP_FBC_SHIFT) |
++		       (plane_wm << WM1_LP_SR_SHIFT) |
++		       cursor_wm);
++		DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
++			      " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
++	}
++	I915_WRITE(WM1_LP_ILK, tmp);
++	/* XXX setup WM2 and WM3 */
+ }
++
+ /**
+  * intel_update_watermarks - update FIFO watermark values based on current modes
+  *
+@@ -3447,7 +3529,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
+  *
+  * We don't use the sprite, so we can ignore that.  And on Crestline we have
+  * to set the non-SR watermarks to 8.
+-  */
++ */
+ static void intel_update_watermarks(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -3463,15 +3545,15 @@ static void intel_update_watermarks(struct drm_device *dev)
+ 	/* Get the clock config from both planes */
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
++		if (intel_crtc->active) {
+ 			enabled++;
+ 			if (intel_crtc->plane == 0) {
+ 				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
+-					  intel_crtc->pipe, crtc->mode.clock);
++					      intel_crtc->pipe, crtc->mode.clock);
+ 				planea_clock = crtc->mode.clock;
+ 			} else {
+ 				DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
+-					  intel_crtc->pipe, crtc->mode.clock);
++					      intel_crtc->pipe, crtc->mode.clock);
+ 				planeb_clock = crtc->mode.clock;
+ 			}
+ 			sr_hdisplay = crtc->mode.hdisplay;
+@@ -3502,62 +3584,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+ 	int plane = intel_crtc->plane;
+-	int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+-	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
+-	int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
+-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++	u32 fp_reg, dpll_reg;
+ 	int refclk, num_connectors = 0;
+ 	intel_clock_t clock, reduced_clock;
+-	u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
++	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
+ 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ 	struct intel_encoder *has_edp_encoder = NULL;
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_encoder *encoder;
++	struct intel_encoder *encoder;
+ 	const intel_limit_t *limit;
+ 	int ret;
+ 	struct fdi_m_n m_n = {0};
+-	int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
+-	int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
+-	int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
+-	int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
+-	int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
+-	int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+-	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+-	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+-	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
+-	int lvds_reg = LVDS;
+-	u32 temp;
+-	int sdvo_pixel_multiply;
++	u32 reg, temp;
+ 	int target_clock;
+ 
+ 	drm_vblank_pre_modeset(dev, pipe);
+ 
+-	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+-		struct intel_encoder *intel_encoder;
+-
+-		if (encoder->crtc != crtc)
++	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
++		if (encoder->base.crtc != crtc)
+ 			continue;
+ 
+-		intel_encoder = enc_to_intel_encoder(encoder);
+-		switch (intel_encoder->type) {
++		switch (encoder->type) {
+ 		case INTEL_OUTPUT_LVDS:
+ 			is_lvds = true;
+ 			break;
+ 		case INTEL_OUTPUT_SDVO:
+ 		case INTEL_OUTPUT_HDMI:
+ 			is_sdvo = true;
+-			if (intel_encoder->needs_tv_clock)
++			if (encoder->needs_tv_clock)
+ 				is_tv = true;
+ 			break;
+ 		case INTEL_OUTPUT_DVO:
+@@ -3573,7 +3628,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			is_dp = true;
+ 			break;
+ 		case INTEL_OUTPUT_EDP:
+-			has_edp_encoder = intel_encoder;
++			has_edp_encoder = encoder;
+ 			break;
+ 		}
+ 
+@@ -3583,15 +3638,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
+ 		refclk = dev_priv->lvds_ssc_freq * 1000;
+ 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+-					refclk / 1000);
+-	} else if (IS_I9XX(dev)) {
++			      refclk / 1000);
++	} else if (!IS_GEN2(dev)) {
+ 		refclk = 96000;
+-		if (HAS_PCH_SPLIT(dev))
++		if (HAS_PCH_SPLIT(dev) &&
++		    (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
+ 			refclk = 120000; /* 120Mhz refclk */
+ 	} else {
+ 		refclk = 48000;
+ 	}
+-	
+ 
+ 	/*
+ 	 * Returns a set of divisors for the desired target clock with the given
+@@ -3607,13 +3662,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	}
+ 
+ 	/* Ensure that the cursor is valid for the new mode before changing... */
+-	intel_crtc_update_cursor(crtc);
++	intel_crtc_update_cursor(crtc, true);
+ 
+ 	if (is_lvds && dev_priv->lvds_downclock_avail) {
+ 		has_reduced_clock = limit->find_pll(limit, crtc,
+-							    dev_priv->lvds_downclock,
+-							    refclk,
+-							    &reduced_clock);
++						    dev_priv->lvds_downclock,
++						    refclk,
++						    &reduced_clock);
+ 		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ 			/*
+ 			 * If the different P is found, it means that we can't
+@@ -3622,7 +3677,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			 * feature.
+ 			 */
+ 			DRM_DEBUG_KMS("Different P is found for "
+-						"LVDS clock/downclock\n");
++				      "LVDS clock/downclock\n");
+ 			has_reduced_clock = 0;
+ 		}
+ 	}
+@@ -3630,14 +3685,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	   this mirrors vbios setting. */
+ 	if (is_sdvo && is_tv) {
+ 		if (adjusted_mode->clock >= 100000
+-				&& adjusted_mode->clock < 140500) {
++		    && adjusted_mode->clock < 140500) {
+ 			clock.p1 = 2;
+ 			clock.p2 = 10;
+ 			clock.n = 3;
+ 			clock.m1 = 16;
+ 			clock.m2 = 8;
+ 		} else if (adjusted_mode->clock >= 140500
+-				&& adjusted_mode->clock <= 200000) {
++			   && adjusted_mode->clock <= 200000) {
+ 			clock.p1 = 1;
+ 			clock.p2 = 10;
+ 			clock.n = 6;
+@@ -3649,34 +3704,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* FDI link */
+ 	if (HAS_PCH_SPLIT(dev)) {
+ 		int lane = 0, link_bw, bpp;
+-		/* eDP doesn't require FDI link, so just set DP M/N
++		/* CPU eDP doesn't require FDI link, so just set DP M/N
+ 		   according to current link config */
+-		if (has_edp_encoder) {
++		if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+ 			target_clock = mode->clock;
+ 			intel_edp_link_config(has_edp_encoder,
+ 					      &lane, &link_bw);
+ 		} else {
+-			/* DP over FDI requires target mode clock
++			/* [e]DP over FDI requires target mode clock
+ 			   instead of link clock */
+-			if (is_dp)
++			if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ 				target_clock = mode->clock;
+ 			else
+ 				target_clock = adjusted_mode->clock;
+-			link_bw = 270000;
++
++			/* FDI is a binary signal running at ~2.7GHz, encoding
++			 * each output octet as 10 bits. The actual frequency
++			 * is stored as a divider into a 100MHz clock, and the
++			 * mode pixel clock is stored in units of 1KHz.
++			 * Hence the bw of each lane in terms of the mode signal
++			 * is:
++			 */
++			link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ 		}
+ 
+ 		/* determine panel color depth */
+-		temp = I915_READ(pipeconf_reg);
++		temp = I915_READ(PIPECONF(pipe));
+ 		temp &= ~PIPE_BPC_MASK;
+ 		if (is_lvds) {
+-			int lvds_reg = I915_READ(PCH_LVDS);
+ 			/* the BPC will be 6 if it is 18-bit LVDS panel */
+-			if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
++			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ 				temp |= PIPE_8BPC;
+ 			else
+ 				temp |= PIPE_6BPC;
+-		} else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
+-			switch (dev_priv->edp_bpp/3) {
++		} else if (has_edp_encoder) {
++			switch (dev_priv->edp.bpp/3) {
+ 			case 8:
+ 				temp |= PIPE_8BPC;
+ 				break;
+@@ -3692,8 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			}
+ 		} else
+ 			temp |= PIPE_8BPC;
+-		I915_WRITE(pipeconf_reg, temp);
+-		I915_READ(pipeconf_reg);
++		I915_WRITE(PIPECONF(pipe), temp);
+ 
+ 		switch (temp & PIPE_BPC_MASK) {
+ 		case PIPE_8BPC:
+@@ -3738,33 +3799,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		/* Always enable nonspread source */
+ 		temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+-		I915_WRITE(PCH_DREF_CONTROL, temp);
+-		POSTING_READ(PCH_DREF_CONTROL);
+-
+ 		temp &= ~DREF_SSC_SOURCE_MASK;
+ 		temp |= DREF_SSC_SOURCE_ENABLE;
+ 		I915_WRITE(PCH_DREF_CONTROL, temp);
+-		POSTING_READ(PCH_DREF_CONTROL);
+ 
++		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 
+ 		if (has_edp_encoder) {
+ 			if (dev_priv->lvds_use_ssc) {
+ 				temp |= DREF_SSC1_ENABLE;
+ 				I915_WRITE(PCH_DREF_CONTROL, temp);
+-				POSTING_READ(PCH_DREF_CONTROL);
+-
+-				udelay(200);
+ 
+-				temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+-				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+-				I915_WRITE(PCH_DREF_CONTROL, temp);
+ 				POSTING_READ(PCH_DREF_CONTROL);
++				udelay(200);
++			}
++			temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
++
++			/* Enable CPU source on CPU attached eDP */
++			if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
++				if (dev_priv->lvds_use_ssc)
++					temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
++				else
++					temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ 			} else {
+-				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+-				I915_WRITE(PCH_DREF_CONTROL, temp);
+-				POSTING_READ(PCH_DREF_CONTROL);
++				/* Enable SSC on PCH eDP if needed */
++				if (dev_priv->lvds_use_ssc) {
++					DRM_ERROR("enabling SSC on PCH\n");
++					temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
++				}
+ 			}
++			I915_WRITE(PCH_DREF_CONTROL, temp);
++			POSTING_READ(PCH_DREF_CONTROL);
++			udelay(200);
+ 		}
+ 	}
+ 
+@@ -3780,23 +3847,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 				reduced_clock.m2;
+ 	}
+ 
++	dpll = 0;
+ 	if (!HAS_PCH_SPLIT(dev))
+ 		dpll = DPLL_VGA_MODE_DIS;
+ 
+-	if (IS_I9XX(dev)) {
++	if (!IS_GEN2(dev)) {
+ 		if (is_lvds)
+ 			dpll |= DPLLB_MODE_LVDS;
+ 		else
+ 			dpll |= DPLLB_MODE_DAC_SERIAL;
+ 		if (is_sdvo) {
++			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
++			if (pixel_multiplier > 1) {
++				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
++					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++				else if (HAS_PCH_SPLIT(dev))
++					dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
++			}
+ 			dpll |= DPLL_DVO_HIGH_SPEED;
+-			sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+-			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+-				dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+-			else if (HAS_PCH_SPLIT(dev))
+-				dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ 		}
+-		if (is_dp)
++		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ 			dpll |= DPLL_DVO_HIGH_SPEED;
+ 
+ 		/* compute bitmask from p1 value */
+@@ -3824,7 +3894,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ 			break;
+ 		}
+-		if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ 	} else {
+ 		if (is_lvds) {
+@@ -3851,7 +3921,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		dpll |= PLL_REF_INPUT_DREFCLK;
+ 
+ 	/* setup pipeconf */
+-	pipeconf = I915_READ(pipeconf_reg);
++	pipeconf = I915_READ(PIPECONF(pipe));
+ 
+ 	/* Set up the display plane register */
+ 	dspcntr = DISPPLANE_GAMMA_ENABLE;
+@@ -3865,7 +3935,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			dspcntr |= DISPPLANE_SEL_PIPE_B;
+ 	}
+ 
+-	if (pipe == 0 && !IS_I965G(dev)) {
++	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
+ 		 * core speed.
+ 		 *
+@@ -3874,51 +3944,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		 */
+ 		if (mode->clock >
+ 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+-			pipeconf |= PIPEACONF_DOUBLE_WIDE;
++			pipeconf |= PIPECONF_DOUBLE_WIDE;
+ 		else
+-			pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
++			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+ 	}
+ 
+ 	dspcntr |= DISPLAY_PLANE_ENABLE;
+-	pipeconf |= PIPEACONF_ENABLE;
++	pipeconf |= PIPECONF_ENABLE;
+ 	dpll |= DPLL_VCO_ENABLE;
+ 
+-
+-	/* Disable the panel fitter if it was on our pipe */
+-	if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
+-		I915_WRITE(PFIT_CONTROL, 0);
+-
+ 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ 	drm_mode_debug_printmodeline(mode);
+ 
+ 	/* assign to Ironlake registers */
+ 	if (HAS_PCH_SPLIT(dev)) {
+-		fp_reg = pch_fp_reg;
+-		dpll_reg = pch_dpll_reg;
++		fp_reg = PCH_FP0(pipe);
++		dpll_reg = PCH_DPLL(pipe);
++	} else {
++		fp_reg = FP0(pipe);
++		dpll_reg = DPLL(pipe);
+ 	}
+ 
+-	if (!has_edp_encoder) {
++	/* PCH eDP needs FDI, but CPU eDP does not */
++	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ 		I915_WRITE(fp_reg, fp);
+ 		I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+-		I915_READ(dpll_reg);
++
++		POSTING_READ(dpll_reg);
+ 		udelay(150);
+ 	}
+ 
+ 	/* enable transcoder DPLL */
+ 	if (HAS_PCH_CPT(dev)) {
+ 		temp = I915_READ(PCH_DPLL_SEL);
+-		if (trans_dpll_sel == 0)
+-			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
++		if (pipe == 0)
++			temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
+ 		else
+-			temp |=	(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++			temp |=	TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ 		I915_WRITE(PCH_DPLL_SEL, temp);
+-		I915_READ(PCH_DPLL_SEL);
+-		udelay(150);
+-	}
+ 
+-	if (HAS_PCH_SPLIT(dev)) {
+-		pipeconf &= ~PIPE_ENABLE_DITHER;
+-		pipeconf &= ~PIPE_DITHER_TYPE_MASK;
++		POSTING_READ(PCH_DPLL_SEL);
++		udelay(150);
+ 	}
+ 
+ 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+@@ -3926,58 +3992,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	 * things on.
+ 	 */
+ 	if (is_lvds) {
+-		u32 lvds;
+-
++		reg = LVDS;
+ 		if (HAS_PCH_SPLIT(dev))
+-			lvds_reg = PCH_LVDS;
++			reg = PCH_LVDS;
+ 
+-		lvds = I915_READ(lvds_reg);
+-		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
++		temp = I915_READ(reg);
++		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ 		if (pipe == 1) {
+ 			if (HAS_PCH_CPT(dev))
+-				lvds |= PORT_TRANS_B_SEL_CPT;
++				temp |= PORT_TRANS_B_SEL_CPT;
+ 			else
+-				lvds |= LVDS_PIPEB_SELECT;
++				temp |= LVDS_PIPEB_SELECT;
+ 		} else {
+ 			if (HAS_PCH_CPT(dev))
+-				lvds &= ~PORT_TRANS_SEL_MASK;
++				temp &= ~PORT_TRANS_SEL_MASK;
+ 			else
+-				lvds &= ~LVDS_PIPEB_SELECT;
++				temp &= ~LVDS_PIPEB_SELECT;
+ 		}
+ 		/* set the corresponsding LVDS_BORDER bit */
+-		lvds |= dev_priv->lvds_border_bits;
++		temp |= dev_priv->lvds_border_bits;
+ 		/* Set the B0-B3 data pairs corresponding to whether we're going to
+ 		 * set the DPLLs for dual-channel mode or not.
+ 		 */
+ 		if (clock.p2 == 7)
+-			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ 		else
+-			lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ 
+ 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ 		 * appropriately here, but we need to look more thoroughly into how
+ 		 * panels behave in the two modes.
+ 		 */
+-		/* set the dithering flag */
+-		if (IS_I965G(dev)) {
+-			if (dev_priv->lvds_dither) {
+-				if (HAS_PCH_SPLIT(dev)) {
+-					pipeconf |= PIPE_ENABLE_DITHER;
+-					pipeconf |= PIPE_DITHER_TYPE_ST01;
+-				} else
+-					lvds |= LVDS_ENABLE_DITHER;
+-			} else {
+-				if (!HAS_PCH_SPLIT(dev)) {
+-					lvds &= ~LVDS_ENABLE_DITHER;
+-				}
+-			}
++		/* set the dithering flag on non-PCH LVDS as needed */
++		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
++			if (dev_priv->lvds_dither)
++				temp |= LVDS_ENABLE_DITHER;
++			else
++				temp &= ~LVDS_ENABLE_DITHER;
++		}
++		I915_WRITE(reg, temp);
++	}
++
++	/* set the dithering flag and clear for anything other than a panel. */
++	if (HAS_PCH_SPLIT(dev)) {
++		pipeconf &= ~PIPECONF_DITHER_EN;
++		pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
++		if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
++			pipeconf |= PIPECONF_DITHER_EN;
++			pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ 		}
+-		I915_WRITE(lvds_reg, lvds);
+-		I915_READ(lvds_reg);
+ 	}
+-	if (is_dp)
++
++	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
+-	else if (HAS_PCH_SPLIT(dev)) {
++	} else if (HAS_PCH_SPLIT(dev)) {
+ 		/* For non-DP output, clear any trans DP clock recovery setting.*/
+ 		if (pipe == 0) {
+ 			I915_WRITE(TRANSA_DATA_M1, 0);
+@@ -3992,29 +4060,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		}
+ 	}
+ 
+-	if (!has_edp_encoder) {
++	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ 		I915_WRITE(fp_reg, fp);
+ 		I915_WRITE(dpll_reg, dpll);
+-		I915_READ(dpll_reg);
++
+ 		/* Wait for the clocks to stabilize. */
++		POSTING_READ(dpll_reg);
+ 		udelay(150);
+ 
+-		if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
++			temp = 0;
+ 			if (is_sdvo) {
+-				sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+-				I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+-					((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+-			} else
+-				I915_WRITE(dpll_md_reg, 0);
++				temp = intel_mode_get_pixel_multiplier(adjusted_mode);
++				if (temp > 1)
++					temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
++				else
++					temp = 0;
++			}
++			I915_WRITE(DPLL_MD(pipe), temp);
+ 		} else {
+ 			/* write it again -- the BIOS does, after all */
+ 			I915_WRITE(dpll_reg, dpll);
+ 		}
+-		I915_READ(dpll_reg);
++
+ 		/* Wait for the clocks to stabilize. */
++		POSTING_READ(dpll_reg);
+ 		udelay(150);
+ 	}
+ 
++	intel_crtc->lowfreq_avail = false;
+ 	if (is_lvds && has_reduced_clock && i915_powersave) {
+ 		I915_WRITE(fp_reg + 4, fp2);
+ 		intel_crtc->lowfreq_avail = true;
+@@ -4024,7 +4098,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		}
+ 	} else {
+ 		I915_WRITE(fp_reg + 4, fp);
+-		intel_crtc->lowfreq_avail = false;
+ 		if (HAS_PIPE_CXSR(dev)) {
+ 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+@@ -4043,70 +4116,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	} else
+ 		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ 
+-	I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++	I915_WRITE(HTOTAL(pipe),
++		   (adjusted_mode->crtc_hdisplay - 1) |
+ 		   ((adjusted_mode->crtc_htotal - 1) << 16));
+-	I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++	I915_WRITE(HBLANK(pipe),
++		   (adjusted_mode->crtc_hblank_start - 1) |
+ 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
+-	I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++	I915_WRITE(HSYNC(pipe),
++		   (adjusted_mode->crtc_hsync_start - 1) |
+ 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+-	I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++
++	I915_WRITE(VTOTAL(pipe),
++		   (adjusted_mode->crtc_vdisplay - 1) |
+ 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
+-	I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++	I915_WRITE(VBLANK(pipe),
++		   (adjusted_mode->crtc_vblank_start - 1) |
+ 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
+-	I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++	I915_WRITE(VSYNC(pipe),
++		   (adjusted_mode->crtc_vsync_start - 1) |
+ 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	/* pipesrc and dspsize control the size that is scaled from, which should
+-	 * always be the user's requested size.
++
++	/* pipesrc and dspsize control the size that is scaled from,
++	 * which should always be the user's requested size.
+ 	 */
+ 	if (!HAS_PCH_SPLIT(dev)) {
+-		I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
+-				(mode->hdisplay - 1));
+-		I915_WRITE(dsppos_reg, 0);
++		I915_WRITE(DSPSIZE(plane),
++			   ((mode->vdisplay - 1) << 16) |
++			   (mode->hdisplay - 1));
++		I915_WRITE(DSPPOS(plane), 0);
+ 	}
+-	I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++	I915_WRITE(PIPESRC(pipe),
++		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ 
+ 	if (HAS_PCH_SPLIT(dev)) {
+-		I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
+-		I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
+-		I915_WRITE(link_m1_reg, m_n.link_m);
+-		I915_WRITE(link_n1_reg, m_n.link_n);
++		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
++		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
++		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
++		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ 
+-		if (has_edp_encoder) {
++		if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ 			ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+-		} else {
+-			/* enable FDI RX PLL too */
+-			temp = I915_READ(fdi_rx_reg);
+-			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+-			I915_READ(fdi_rx_reg);
+-			udelay(200);
+-
+-			/* enable FDI TX PLL too */
+-			temp = I915_READ(fdi_tx_reg);
+-			I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+-			I915_READ(fdi_tx_reg);
+-
+-			/* enable FDI RX PCDCLK */
+-			temp = I915_READ(fdi_rx_reg);
+-			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+-			I915_READ(fdi_rx_reg);
+-			udelay(200);
+ 		}
+ 	}
+ 
+-	I915_WRITE(pipeconf_reg, pipeconf);
+-	I915_READ(pipeconf_reg);
++	I915_WRITE(PIPECONF(pipe), pipeconf);
++	POSTING_READ(PIPECONF(pipe));
+ 
+ 	intel_wait_for_vblank(dev, pipe);
+ 
+-	if (IS_IRONLAKE(dev)) {
++	if (IS_GEN5(dev)) {
+ 		/* enable address swizzle for tiling buffer */
+ 		temp = I915_READ(DISP_ARB_CTL);
+ 		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+ 	}
+ 
+-	I915_WRITE(dspcntr_reg, dspcntr);
++	I915_WRITE(DSPCNTR(plane), dspcntr);
+ 
+-	/* Flush the plane changes */
+ 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ 
+ 	intel_update_watermarks(dev);
+@@ -4199,7 +4264,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+ }
+ 
+ /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
++static void intel_crtc_update_cursor(struct drm_crtc *crtc,
++				     bool on)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -4212,7 +4278,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+ 
+ 	pos = 0;
+ 
+-	if (intel_crtc->cursor_on && crtc->fb) {
++	if (on && crtc->enabled && crtc->fb) {
+ 		base = intel_crtc->cursor_addr;
+ 		if (x > (int) crtc->fb->width)
+ 			base = 0;
+@@ -4324,7 +4390,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 		addr = obj_priv->phys_obj->handle->busaddr;
+ 	}
+ 
+-	if (!IS_I9XX(dev))
++	if (IS_GEN2(dev))
+ 		I915_WRITE(CURSIZE, (height << 12) | width);
+ 
+  finish:
+@@ -4344,7 +4410,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	intel_crtc->cursor_width = width;
+ 	intel_crtc->cursor_height = height;
+ 
+-	intel_crtc_update_cursor(crtc);
++	intel_crtc_update_cursor(crtc, true);
+ 
+ 	return 0;
+ fail_unpin:
+@@ -4363,7 +4429,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ 	intel_crtc->cursor_x = x;
+ 	intel_crtc->cursor_y = y;
+ 
+-	intel_crtc_update_cursor(crtc);
++	intel_crtc_update_cursor(crtc, true);
+ 
+ 	return 0;
+ }
+@@ -4432,7 +4498,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ 	struct intel_crtc *intel_crtc;
+ 	struct drm_crtc *possible_crtc;
+ 	struct drm_crtc *supported_crtc =NULL;
+-	struct drm_encoder *encoder = &intel_encoder->enc;
++	struct drm_encoder *encoder = &intel_encoder->base;
+ 	struct drm_crtc *crtc = NULL;
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -4513,7 +4579,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ 				    struct drm_connector *connector, int dpms_mode)
+ {
+-	struct drm_encoder *encoder = &intel_encoder->enc;
++	struct drm_encoder *encoder = &intel_encoder->base;
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_crtc *crtc = encoder->crtc;
+ 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -4559,7 +4625,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ 	}
+ 
+-	if (IS_I9XX(dev)) {
++	if (!IS_GEN2(dev)) {
+ 		if (IS_PINEVIEW(dev))
+ 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+@@ -4663,8 +4729,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
+ 	struct drm_device *dev = (struct drm_device *)arg;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+-
+ 	dev_priv->busy = false;
+ 
+ 	queue_work(dev_priv->wq, &dev_priv->idle_work);
+@@ -4678,14 +4742,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
+ 	struct drm_crtc *crtc = &intel_crtc->base;
+ 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ 
+-	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+-
+ 	intel_crtc->busy = false;
+ 
+ 	queue_work(dev_priv->wq, &dev_priv->idle_work);
+ }
+ 
+-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
++static void intel_increase_pllclock(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -4720,9 +4782,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+ 	}
+ 
+ 	/* Schedule downclock */
+-	if (schedule)
+-		mod_timer(&intel_crtc->idle_timer, jiffies +
+-			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
++	mod_timer(&intel_crtc->idle_timer, jiffies +
++		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ }
+ 
+ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+@@ -4858,7 +4919,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+ 					I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ 				}
+ 				/* Non-busy -> busy, upclock */
+-				intel_increase_pllclock(crtc, true);
++				intel_increase_pllclock(crtc);
+ 				intel_crtc->busy = true;
+ 			} else {
+ 				/* Busy -> busy, put off timer */
+@@ -4872,8 +4933,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+ static void intel_crtc_destroy(struct drm_crtc *crtc)
+ {
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct intel_unpin_work *work;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++	work = intel_crtc->unpin_work;
++	intel_crtc->unpin_work = NULL;
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++
++	if (work) {
++		cancel_work_sync(&work->work);
++		kfree(work);
++	}
+ 
+ 	drm_crtc_cleanup(crtc);
++
+ 	kfree(intel_crtc);
+ }
+ 
+@@ -4928,12 +5003,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+ 
+ 	spin_unlock_irqrestore(&dev->event_lock, flags);
+ 
+-	obj_priv = to_intel_bo(work->pending_flip_obj);
+-
+-	/* Initial scanout buffer will have a 0 pending flip count */
+-	if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+-	    atomic_dec_and_test(&obj_priv->pending_flip))
+-		DRM_WAKEUP(&dev_priv->pending_flip_queue);
++	obj_priv = to_intel_bo(work->old_fb_obj);
++	atomic_clear_mask(1 << intel_crtc->plane,
++			  &obj_priv->pending_flip.counter);
++	if (atomic_read(&obj_priv->pending_flip) == 0)
++		wake_up(&dev_priv->pending_flip_queue);
+ 	schedule_work(&work->work);
+ 
+ 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+@@ -5014,7 +5088,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 	obj = intel_fb->obj;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = intel_pin_and_fence_fb_obj(dev, obj);
++	ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ 	if (ret)
+ 		goto cleanup_work;
+ 
+@@ -5023,29 +5097,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 	drm_gem_object_reference(obj);
+ 
+ 	crtc->fb = fb;
+-	ret = i915_gem_object_flush_write_domain(obj);
+-	if (ret)
+-		goto cleanup_objs;
+ 
+ 	ret = drm_vblank_get(dev, intel_crtc->pipe);
+ 	if (ret)
+ 		goto cleanup_objs;
+ 
+-	obj_priv = to_intel_bo(obj);
+-	atomic_inc(&obj_priv->pending_flip);
++	/* Block clients from rendering to the new back buffer until
++	 * the flip occurs and the object is no longer visible.
++	 */
++	atomic_add(1 << intel_crtc->plane,
++		   &to_intel_bo(work->old_fb_obj)->pending_flip);
++
+ 	work->pending_flip_obj = obj;
++	obj_priv = to_intel_bo(obj);
+ 
+ 	if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ 		u32 flip_mask;
+ 
++		/* Can't queue multiple flips, so wait for the previous
++		 * one to finish before executing the next.
++		 */
++		BEGIN_LP_RING(2);
+ 		if (intel_crtc->plane)
+ 			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ 		else
+ 			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+-
+-		BEGIN_LP_RING(2);
+ 		OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+-		OUT_RING(0);
++		OUT_RING(MI_NOOP);
+ 		ADVANCE_LP_RING();
+ 	}
+ 
+@@ -5126,15 +5204,14 @@ cleanup_work:
+ 	return ret;
+ }
+ 
+-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
++static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ 	.dpms = intel_crtc_dpms,
+ 	.mode_fixup = intel_crtc_mode_fixup,
+ 	.mode_set = intel_crtc_mode_set,
+ 	.mode_set_base = intel_pipe_set_base,
+ 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
+-	.prepare = intel_crtc_prepare,
+-	.commit = intel_crtc_commit,
+ 	.load_lut = intel_crtc_load_lut,
++	.disable = intel_crtc_disable,
+ };
+ 
+ static const struct drm_crtc_funcs intel_crtc_funcs = {
+@@ -5160,8 +5237,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ 
+ 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+-	intel_crtc->pipe = pipe;
+-	intel_crtc->plane = pipe;
+ 	for (i = 0; i < 256; i++) {
+ 		intel_crtc->lut_r[i] = i;
+ 		intel_crtc->lut_g[i] = i;
+@@ -5171,9 +5246,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ 	/* Swap pipes & planes for FBC on pre-965 */
+ 	intel_crtc->pipe = pipe;
+ 	intel_crtc->plane = pipe;
+-	if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
++	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+-		intel_crtc->plane = ((pipe == 0) ? 1 : 0);
++		intel_crtc->plane = !pipe;
+ 	}
+ 
+ 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+@@ -5183,6 +5258,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ 
+ 	intel_crtc->cursor_addr = 0;
+ 	intel_crtc->dpms_mode = -1;
++	intel_crtc->active = true; /* force the pipe off on setup_init_config */
++
++	if (HAS_PCH_SPLIT(dev)) {
++		intel_helper_funcs.prepare = ironlake_crtc_prepare;
++		intel_helper_funcs.commit = ironlake_crtc_commit;
++	} else {
++		intel_helper_funcs.prepare = i9xx_crtc_prepare;
++		intel_helper_funcs.commit = i9xx_crtc_commit;
++	}
++
+ 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+ 
+ 	intel_crtc->busy = false;
+@@ -5218,38 +5303,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ 	return 0;
+ }
+ 
+-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+-{
+-	struct drm_crtc *crtc = NULL;
+-
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-		if (intel_crtc->pipe == pipe)
+-			break;
+-	}
+-	return crtc;
+-}
+-
+ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+ {
++	struct intel_encoder *encoder;
+ 	int index_mask = 0;
+-	struct drm_encoder *encoder;
+ 	int entry = 0;
+ 
+-        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-		if (type_mask & intel_encoder->clone_mask)
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
++		if (type_mask & encoder->clone_mask)
+ 			index_mask |= (1 << entry);
+ 		entry++;
+ 	}
++
+ 	return index_mask;
+ }
+ 
+-
+ static void intel_setup_outputs(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_encoder *encoder;
++	struct intel_encoder *encoder;
+ 	bool dpd_is_edp = false;
+ 
+ 	if (IS_MOBILE(dev) && !IS_I830(dev))
+@@ -5338,12 +5410,10 @@ static void intel_setup_outputs(struct drm_device *dev)
+ 	if (SUPPORTS_TV(dev))
+ 		intel_tv_init(dev);
+ 
+-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-
+-		encoder->possible_crtcs = intel_encoder->crtc_mask;
+-		encoder->possible_clones = intel_encoder_clones(dev,
+-						intel_encoder->clone_mask);
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
++		encoder->base.possible_crtcs = encoder->crtc_mask;
++		encoder->base.possible_clones =
++			intel_encoder_clones(dev, encoder->clone_mask);
+ 	}
+ }
+ 
+@@ -5377,8 +5447,25 @@ int intel_framebuffer_init(struct drm_device *dev,
+ 			   struct drm_mode_fb_cmd *mode_cmd,
+ 			   struct drm_gem_object *obj)
+ {
++	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ 	int ret;
+ 
++	if (obj_priv->tiling_mode == I915_TILING_Y)
++		return -EINVAL;
++
++	if (mode_cmd->pitch & 63)
++		return -EINVAL;
++
++	switch (mode_cmd->bpp) {
++	case 8:
++	case 16:
++	case 24:
++	case 32:
++		break;
++	default:
++		return -EINVAL;
++	}
++
+ 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ 	if (ret) {
+ 		DRM_ERROR("framebuffer init failed %d\n", ret);
+@@ -5487,6 +5574,10 @@ void ironlake_enable_drps(struct drm_device *dev)
+ 	u32 rgvmodectl = I915_READ(MEMMODECTL);
+ 	u8 fmax, fmin, fstart, vstart;
+ 
++	/* Enable temp reporting */
++	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
++	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
++
+ 	/* 100ms RC evaluation intervals */
+ 	I915_WRITE(RCUPEI, 100000);
+ 	I915_WRITE(RCDNEI, 100000);
+@@ -5502,20 +5593,19 @@ void ironlake_enable_drps(struct drm_device *dev)
+ 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ 		MEMMODE_FSTART_SHIFT;
+-	fstart = fmax;
+ 
+ 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ 		PXVFREQ_PX_SHIFT;
+ 
+-	dev_priv->fmax = fstart; /* IPS callback will increase this */
++	dev_priv->fmax = fmax; /* IPS callback will increase this */
+ 	dev_priv->fstart = fstart;
+ 
+-	dev_priv->max_delay = fmax;
++	dev_priv->max_delay = fstart;
+ 	dev_priv->min_delay = fmin;
+ 	dev_priv->cur_delay = fstart;
+ 
+-	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+-			 fstart);
++	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
++			 fmax, fmin, fstart);
+ 
+ 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+ 
+@@ -5529,7 +5619,7 @@ void ironlake_enable_drps(struct drm_device *dev)
+ 	rgvmodectl |= MEMMODE_SWMODE_EN;
+ 	I915_WRITE(MEMMODECTL, rgvmodectl);
+ 
+-	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
++	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ 		DRM_ERROR("stuck trying to change perf mode\n");
+ 	msleep(1);
+ 
+@@ -5660,7 +5750,7 @@ void intel_init_clock_gating(struct drm_device *dev)
+ 	if (HAS_PCH_SPLIT(dev)) {
+ 		uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ 
+-		if (IS_IRONLAKE(dev)) {
++		if (IS_GEN5(dev)) {
+ 			/* Required for FBC */
+ 			dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+ 			/* Required for CxSR */
+@@ -5674,13 +5764,20 @@ void intel_init_clock_gating(struct drm_device *dev)
+ 		I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ 
+ 		/*
++		 * On Ibex Peak and Cougar Point, we need to disable clock
++		 * gating for the panel power sequencer or it will fail to
++		 * start up when no ports are active.
++		 */
++		I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
++
++		/*
+ 		 * According to the spec the following bits should be set in
+ 		 * order to enable memory self-refresh
+ 		 * The bit 22/21 of 0x42004
+ 		 * The bit 5 of 0x42020
+ 		 * The bit 15 of 0x45000
+ 		 */
+-		if (IS_IRONLAKE(dev)) {
++		if (IS_GEN5(dev)) {
+ 			I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ 					(I915_READ(ILK_DISPLAY_CHICKEN2) |
+ 					ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+@@ -5728,20 +5825,20 @@ void intel_init_clock_gating(struct drm_device *dev)
+ 		if (IS_GM45(dev))
+ 			dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ 		I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+-	} else if (IS_I965GM(dev)) {
++	} else if (IS_CRESTLINE(dev)) {
+ 		I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ 		I915_WRITE(RENCLK_GATE_D2, 0);
+ 		I915_WRITE(DSPCLK_GATE_D, 0);
+ 		I915_WRITE(RAMCLK_GATE_D, 0);
+ 		I915_WRITE16(DEUC, 0);
+-	} else if (IS_I965G(dev)) {
++	} else if (IS_BROADWATER(dev)) {
+ 		I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ 		       I965_RCC_CLOCK_GATE_DISABLE |
+ 		       I965_RCPB_CLOCK_GATE_DISABLE |
+ 		       I965_ISC_CLOCK_GATE_DISABLE |
+ 		       I965_FBC_CLOCK_GATE_DISABLE);
+ 		I915_WRITE(RENCLK_GATE_D2, 0);
+-	} else if (IS_I9XX(dev)) {
++	} else if (IS_GEN3(dev)) {
+ 		u32 dstate = I915_READ(D_STATE);
+ 
+ 		dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+@@ -5823,7 +5920,7 @@ static void intel_init_display(struct drm_device *dev)
+ 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ 			dev_priv->display.enable_fbc = g4x_enable_fbc;
+ 			dev_priv->display.disable_fbc = g4x_disable_fbc;
+-		} else if (IS_I965GM(dev)) {
++		} else if (IS_CRESTLINE(dev)) {
+ 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
+@@ -5856,7 +5953,7 @@ static void intel_init_display(struct drm_device *dev)
+ 
+ 	/* For FIFO watermark updates */
+ 	if (HAS_PCH_SPLIT(dev)) {
+-		if (IS_IRONLAKE(dev)) {
++		if (IS_GEN5(dev)) {
+ 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ 				dev_priv->display.update_wm = ironlake_update_wm;
+ 			else {
+@@ -5883,9 +5980,9 @@ static void intel_init_display(struct drm_device *dev)
+ 			dev_priv->display.update_wm = pineview_update_wm;
+ 	} else if (IS_G4X(dev))
+ 		dev_priv->display.update_wm = g4x_update_wm;
+-	else if (IS_I965G(dev))
++	else if (IS_GEN4(dev))
+ 		dev_priv->display.update_wm = i965_update_wm;
+-	else if (IS_I9XX(dev)) {
++	else if (IS_GEN3(dev)) {
+ 		dev_priv->display.update_wm = i9xx_update_wm;
+ 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ 	} else if (IS_I85X(dev)) {
+@@ -5999,24 +6096,24 @@ void intel_modeset_init(struct drm_device *dev)
+ 
+ 	intel_init_display(dev);
+ 
+-	if (IS_I965G(dev)) {
+-		dev->mode_config.max_width = 8192;
+-		dev->mode_config.max_height = 8192;
+-	} else if (IS_I9XX(dev)) {
++	if (IS_GEN2(dev)) {
++		dev->mode_config.max_width = 2048;
++		dev->mode_config.max_height = 2048;
++	} else if (IS_GEN3(dev)) {
+ 		dev->mode_config.max_width = 4096;
+ 		dev->mode_config.max_height = 4096;
+ 	} else {
+-		dev->mode_config.max_width = 2048;
+-		dev->mode_config.max_height = 2048;
++		dev->mode_config.max_width = 8192;
++		dev->mode_config.max_height = 8192;
+ 	}
+ 
+ 	/* set memory base */
+-	if (IS_I9XX(dev))
+-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+-	else
++	if (IS_GEN2(dev))
+ 		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
++	else
++		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+ 
+-	if (IS_MOBILE(dev) || IS_I9XX(dev))
++	if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ 		dev_priv->num_pipe = 2;
+ 	else
+ 		dev_priv->num_pipe = 1;
+@@ -6052,10 +6149,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ 	struct drm_crtc *crtc;
+ 	struct intel_crtc *intel_crtc;
+ 
++	drm_kms_helper_poll_fini(dev);
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	drm_kms_helper_poll_fini(dev);
+-	intel_fbdev_fini(dev);
++	intel_unregister_dsm_handler();
++
+ 
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		/* Skip inactive CRTCs */
+@@ -6063,12 +6161,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ 			continue;
+ 
+ 		intel_crtc = to_intel_crtc(crtc);
+-		intel_increase_pllclock(crtc, false);
+-		del_timer_sync(&intel_crtc->idle_timer);
++		intel_increase_pllclock(crtc);
+ 	}
+ 
+-	del_timer_sync(&dev_priv->idle_timer);
+-
+ 	if (dev_priv->display.disable_fbc)
+ 		dev_priv->display.disable_fbc(dev);
+ 
+@@ -6097,33 +6192,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 
++	/* Disable the irq before mode object teardown, for the irq might
++	 * enqueue unpin/hotplug work. */
++	drm_irq_uninstall(dev);
++	cancel_work_sync(&dev_priv->hotplug_work);
++
++	/* Shut off idle work before the crtcs get freed. */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		intel_crtc = to_intel_crtc(crtc);
++		del_timer_sync(&intel_crtc->idle_timer);
++	}
++	del_timer_sync(&dev_priv->idle_timer);
++	cancel_work_sync(&dev_priv->idle_work);
++
+ 	drm_mode_config_cleanup(dev);
+ }
+ 
+-
+ /*
+  * Return which encoder is currently attached for connector.
+  */
+-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
++struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+ {
+-	struct drm_mode_object *obj;
+-	struct drm_encoder *encoder;
+-	int i;
+-
+-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+-		if (connector->encoder_ids[i] == 0)
+-			break;
+-
+-		obj = drm_mode_object_find(connector->dev,
+-                                           connector->encoder_ids[i],
+-                                           DRM_MODE_OBJECT_ENCODER);
+-		if (!obj)
+-			continue;
++	return &intel_attached_encoder(connector)->base;
++}
+ 
+-		encoder = obj_to_encoder(obj);
+-		return encoder;
+-	}
+-	return NULL;
++void intel_connector_attach_encoder(struct intel_connector *connector,
++				    struct intel_encoder *encoder)
++{
++	connector->encoder = encoder;
++	drm_mode_connector_attach_encoder(&connector->base,
++					  &encoder->base);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 9ab8708..2d3dee9 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -42,15 +42,13 @@
+ 
+ #define DP_LINK_CONFIGURATION_SIZE	9
+ 
+-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
+-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
+-
+ struct intel_dp {
+ 	struct intel_encoder base;
+ 	uint32_t output_reg;
+ 	uint32_t DP;
+ 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ 	bool has_audio;
++	int force_audio;
+ 	int dpms_mode;
+ 	uint8_t link_bw;
+ 	uint8_t lane_count;
+@@ -58,14 +56,69 @@ struct intel_dp {
+ 	struct i2c_adapter adapter;
+ 	struct i2c_algo_dp_aux_data algo;
+ 	bool is_pch_edp;
++	uint8_t	train_set[4];
++	uint8_t link_status[DP_LINK_STATUS_SIZE];
++
++	struct drm_property *force_audio_property;
+ };
+ 
++/**
++ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
++ * @intel_dp: DP struct
++ *
++ * If a CPU or PCH DP output is attached to an eDP panel, this function
++ * will return true, and false otherwise.
++ */
++static bool is_edp(struct intel_dp *intel_dp)
++{
++	return intel_dp->base.type == INTEL_OUTPUT_EDP;
++}
++
++/**
++ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
++ * @intel_dp: DP struct
++ *
++ * Returns true if the given DP struct corresponds to a PCH DP port attached
++ * to an eDP panel, false otherwise.  Helpful for determining whether we
++ * may need FDI resources for a given DP output or not.
++ */
++static bool is_pch_edp(struct intel_dp *intel_dp)
++{
++	return intel_dp->is_pch_edp;
++}
++
+ static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
++	return container_of(encoder, struct intel_dp, base.base);
++}
++
++static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
++{
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_dp, base);
++}
++
++/**
++ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
++ * @encoder: DRM encoder
++ *
++ * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
++ * by intel_display.c.
++ */
++bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
++{
++	struct intel_dp *intel_dp;
++
++	if (!encoder)
++		return false;
++
++	intel_dp = enc_to_intel_dp(encoder);
++
++	return is_pch_edp(intel_dp);
+ }
+ 
+-static void intel_dp_link_train(struct intel_dp *intel_dp);
++static void intel_dp_start_link_train(struct intel_dp *intel_dp);
++static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+ static void intel_dp_link_down(struct intel_dp *intel_dp);
+ 
+ void
+@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+-	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+-		return (pixel_clock * dev_priv->edp_bpp) / 8;
++	if (is_edp(intel_dp))
++		return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
+ 	else
+ 		return pixel_clock * 3;
+ }
+@@ -145,15 +198,13 @@ static int
+ intel_dp_mode_valid(struct drm_connector *connector,
+ 		    struct drm_display_mode *mode)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++	struct intel_dp *intel_dp = intel_attached_dp(connector);
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ 	int max_lanes = intel_dp_max_lane_count(intel_dp);
+ 
+-	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+-	    dev_priv->panel_fixed_mode) {
++	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ 		if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+ 			return MODE_PANEL;
+ 
+@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
+ 
+ 	/* only refuse the mode on non eDP since we have seen some wierd eDP panels
+ 	   which are outside spec tolerances but somehow work by magic */
+-	if (!IS_eDP(intel_dp) &&
++	if (!is_edp(intel_dp) &&
+ 	    (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
+ 	     > intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ 		return MODE_CLOCK_HIGH;
+@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 		uint8_t *recv, int recv_size)
+ {
+ 	uint32_t output_reg = intel_dp->output_reg;
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	uint32_t ch_ctl = output_reg + 0x10;
+ 	uint32_t ch_data = ch_ctl + 4;
+@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 	/* The clock divider is based off the hrawclk,
+ 	 * and would like to run at 2MHz. So, take the
+ 	 * hrawclk value and divide by 2 and use that
++	 *
++	 * Note that PCH attached eDP panels should use a 125MHz input
++	 * clock divider.
+ 	 */
+-	if (IS_eDP(intel_dp)) {
++	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ 		if (IS_GEN6(dev))
+ 			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ 		else
+@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ 
+-	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+-	    dev_priv->panel_fixed_mode) {
++	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ 		intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ 		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ 					mode, adjusted_mode);
+@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 		mode->clock = dev_priv->panel_fixed_mode->clock;
+ 	}
+ 
++	/* Just use VBT values for eDP */
++	if (is_edp(intel_dp)) {
++		intel_dp->lane_count = dev_priv->edp.lanes;
++		intel_dp->link_bw = dev_priv->edp.rate;
++		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
++		DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
++			      intel_dp->link_bw, intel_dp->lane_count,
++			      adjusted_mode->clock);
++		return true;
++	}
++
+ 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ 		for (clock = 0; clock <= max_clock; clock++) {
+ 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 		}
+ 	}
+ 
+-	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+-		/* okay we failed just pick the highest */
+-		intel_dp->lane_count = max_lane_count;
+-		intel_dp->link_bw = bws[max_clock];
+-		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+-		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+-			      "count %d clock %d\n",
+-			      intel_dp->link_bw, intel_dp->lane_count,
+-			      adjusted_mode->clock);
+-
+-		return true;
+-	}
+-
+ 	return false;
+ }
+ 
+@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
+ 	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ }
+ 
+-bool intel_pch_has_edp(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_encoder *encoder;
+-
+-	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+-		struct intel_dp *intel_dp;
+-
+-		if (encoder->crtc != crtc)
+-			continue;
+-
+-		intel_dp = enc_to_intel_dp(encoder);
+-		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+-			return intel_dp->is_pch_edp;
+-	}
+-	return false;
+-}
+-
+ void
+ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ 		 struct drm_display_mode *adjusted_mode)
+@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ 		intel_dp = enc_to_intel_dp(encoder);
+ 		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ 			lane_count = intel_dp->lane_count;
+-			if (IS_PCH_eDP(intel_dp))
+-				bpp = dev_priv->edp_bpp;
++			break;
++		} else if (is_edp(intel_dp)) {
++			lane_count = dev_priv->edp.lanes;
++			bpp = dev_priv->edp.bpp;
+ 			break;
+ 		}
+ 	}
+@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+-	struct drm_crtc *crtc = intel_dp->base.enc.crtc;
++	struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 
+ 	intel_dp->DP = (DP_VOLTAGE_0_4 |
+@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ 		intel_dp->DP |= DP_SYNC_VS_HIGH;
+ 
+-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ 	else
+ 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
+@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
+ 		intel_dp->DP |= DP_PIPEB_SELECT;
+ 
+-	if (IS_eDP(intel_dp)) {
++	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ 		/* don't miss out required setting for eDP */
+ 		intel_dp->DP |= DP_PLL_ENABLE;
+ 		if (adjusted_mode->clock < 200000)
+@@ -754,13 +788,16 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	}
+ }
+ 
+-static void ironlake_edp_panel_on (struct drm_device *dev)
++/* Returns true if the panel was already on when called */
++static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
+ {
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 pp;
++	u32 pp, idle_on = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
++	u32 idle_on_mask = PP_ON | PP_SEQUENCE_STATE_MASK;
+ 
+ 	if (I915_READ(PCH_PP_STATUS) & PP_ON)
+-		return;
++		return true;
+ 
+ 	pp = I915_READ(PCH_PP_CONTROL);
+ 
+@@ -771,21 +808,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
+ 
+ 	pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
++	POSTING_READ(PCH_PP_CONTROL);
+ 
+-	if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
++	/* Ouch. We need to wait here for some panels, like Dell e6510
++	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
++	 */
++	msleep(300);
++
++	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on,
++		     5000))
+ 		DRM_ERROR("panel on wait timed out: 0x%08x\n",
+ 			  I915_READ(PCH_PP_STATUS));
+ 
+-	pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ 	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
+ 	POSTING_READ(PCH_PP_CONTROL);
++
++	return false;
+ }
+ 
+ static void ironlake_edp_panel_off (struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 pp;
++	u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
++		PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+ 
+ 	pp = I915_READ(PCH_PP_CONTROL);
+ 
+@@ -796,15 +842,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
+ 
+ 	pp &= ~POWER_TARGET_ON;
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
++	POSTING_READ(PCH_PP_CONTROL);
+ 
+-	if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
++	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
+ 		DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ 			  I915_READ(PCH_PP_STATUS));
+ 
+-	/* Make sure VDD is enabled so DP AUX will work */
+-	pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
++	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
+ 	POSTING_READ(PCH_PP_CONTROL);
++
++	/* Ouch. We need to wait here for some panels, like Dell e6510
++	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
++	 */
++	msleep(300);
+ }
+ 
+ static void ironlake_edp_backlight_on (struct drm_device *dev)
+@@ -813,6 +864,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
+ 	u32 pp;
+ 
+ 	DRM_DEBUG_KMS("\n");
++	/*
++	 * If we enable the backlight right away following a panel power
++	 * on, we may see slight flicker as the panel syncs with the eDP
++	 * link.  So delay a bit to make sure the image is solid before
++	 * allowing it to appear.
++	 */
++	msleep(300);
+ 	pp = I915_READ(PCH_PP_CONTROL);
+ 	pp |= EDP_BLC_ENABLE;
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
+@@ -837,8 +895,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+ 
+ 	DRM_DEBUG_KMS("\n");
+ 	dpa_ctl = I915_READ(DP_A);
+-	dpa_ctl &= ~DP_PLL_ENABLE;
++	dpa_ctl |= DP_PLL_ENABLE;
+ 	I915_WRITE(DP_A, dpa_ctl);
++	POSTING_READ(DP_A);
++	udelay(200);
+ }
+ 
+ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+@@ -848,8 +908,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+ 	u32 dpa_ctl;
+ 
+ 	dpa_ctl = I915_READ(DP_A);
+-	dpa_ctl |= DP_PLL_ENABLE;
++	dpa_ctl &= ~DP_PLL_ENABLE;
+ 	I915_WRITE(DP_A, dpa_ctl);
++	POSTING_READ(DP_A);
+ 	udelay(200);
+ }
+ 
+@@ -857,29 +918,32 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ 	struct drm_device *dev = encoder->dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ 
+-	if (IS_eDP(intel_dp)) {
++	if (is_edp(intel_dp)) {
+ 		ironlake_edp_backlight_off(dev);
+-		ironlake_edp_panel_on(dev);
+-		ironlake_edp_pll_on(encoder);
++		ironlake_edp_panel_off(dev);
++		ironlake_edp_panel_on(intel_dp);
++		if (!is_pch_edp(intel_dp))
++			ironlake_edp_pll_on(encoder);
++		else
++			ironlake_edp_pll_off(encoder);
+ 	}
+-	if (dp_reg & DP_PORT_EN)
+-		intel_dp_link_down(intel_dp);
++	intel_dp_link_down(intel_dp);
+ }
+ 
+ static void intel_dp_commit(struct drm_encoder *encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ 	struct drm_device *dev = encoder->dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ 
+-	if (!(dp_reg & DP_PORT_EN)) {
+-		intel_dp_link_train(intel_dp);
+-	}
+-	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
++	intel_dp_start_link_train(intel_dp);
++
++	if (is_edp(intel_dp))
++		ironlake_edp_panel_on(intel_dp);
++
++	intel_dp_complete_link_train(intel_dp);
++
++	if (is_edp(intel_dp))
+ 		ironlake_edp_backlight_on(dev);
+ }
+ 
+@@ -892,22 +956,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ 
+ 	if (mode != DRM_MODE_DPMS_ON) {
+-		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
++		if (is_edp(intel_dp))
+ 			ironlake_edp_backlight_off(dev);
++		intel_dp_link_down(intel_dp);
++		if (is_edp(intel_dp))
+ 			ironlake_edp_panel_off(dev);
+-		}
+-		if (dp_reg & DP_PORT_EN)
+-			intel_dp_link_down(intel_dp);
+-		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
++		if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
+ 			ironlake_edp_pll_off(encoder);
+ 	} else {
++		if (is_edp(intel_dp))
++			ironlake_edp_panel_on(intel_dp);
+ 		if (!(dp_reg & DP_PORT_EN)) {
+-			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+-				ironlake_edp_panel_on(dev);
+-			intel_dp_link_train(intel_dp);
+-			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+-				ironlake_edp_backlight_on(dev);
++			intel_dp_start_link_train(intel_dp);
++			intel_dp_complete_link_train(intel_dp);
+ 		}
++		if (is_edp(intel_dp))
++			ironlake_edp_backlight_on(dev);
+ 	}
+ 	intel_dp->dpms_mode = mode;
+ }
+@@ -917,14 +981,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+  * link status information
+  */
+ static bool
+-intel_dp_get_link_status(struct intel_dp *intel_dp,
+-			 uint8_t link_status[DP_LINK_STATUS_SIZE])
++intel_dp_get_link_status(struct intel_dp *intel_dp)
+ {
+ 	int ret;
+ 
+ 	ret = intel_dp_aux_native_read(intel_dp,
+ 				       DP_LANE0_1_STATUS,
+-				       link_status, DP_LINK_STATUS_SIZE);
++				       intel_dp->link_status, DP_LINK_STATUS_SIZE);
+ 	if (ret != DP_LINK_STATUS_SIZE)
+ 		return false;
+ 	return true;
+@@ -999,18 +1062,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+ }
+ 
+ static void
+-intel_get_adjust_train(struct intel_dp *intel_dp,
+-		       uint8_t link_status[DP_LINK_STATUS_SIZE],
+-		       int lane_count,
+-		       uint8_t train_set[4])
++intel_get_adjust_train(struct intel_dp *intel_dp)
+ {
+ 	uint8_t v = 0;
+ 	uint8_t p = 0;
+ 	int lane;
+ 
+-	for (lane = 0; lane < lane_count; lane++) {
+-		uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+-		uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
++	for (lane = 0; lane < intel_dp->lane_count; lane++) {
++		uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
++		uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+ 
+ 		if (this_v > v)
+ 			v = this_v;
+@@ -1025,15 +1085,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
+ 		p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ 
+ 	for (lane = 0; lane < 4; lane++)
+-		train_set[lane] = v | p;
++		intel_dp->train_set[lane] = v | p;
+ }
+ 
+ static uint32_t
+-intel_dp_signal_levels(uint8_t train_set, int lane_count)
++intel_dp_signal_levels(struct intel_dp *intel_dp)
+ {
+-	uint32_t	signal_levels = 0;
++	struct drm_device *dev = intel_dp->base.base.dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	uint32_t signal_levels = 0;
++	u8 train_set = intel_dp->train_set[0];
++	u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
++	u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
++
++	if (is_edp(intel_dp)) {
++		vswing = dev_priv->edp.vswing;
++		preemphasis = dev_priv->edp.preemphasis;
++	}
+ 
+-	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
++	switch (vswing) {
+ 	case DP_TRAIN_VOLTAGE_SWING_400:
+ 	default:
+ 		signal_levels |= DP_VOLTAGE_0_4;
+@@ -1048,7 +1118,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
+ 		signal_levels |= DP_VOLTAGE_1_2;
+ 		break;
+ 	}
+-	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
++	switch (preemphasis) {
+ 	case DP_TRAIN_PRE_EMPHASIS_0:
+ 	default:
+ 		signal_levels |= DP_PRE_EMPHASIS_0;
+@@ -1116,18 +1186,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
+ 			 DP_LANE_CHANNEL_EQ_DONE|\
+ 			 DP_LANE_SYMBOL_LOCKED)
+ static bool
+-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
++intel_channel_eq_ok(struct intel_dp *intel_dp)
+ {
+ 	uint8_t lane_align;
+ 	uint8_t lane_status;
+ 	int lane;
+ 
+-	lane_align = intel_dp_link_status(link_status,
++	lane_align = intel_dp_link_status(intel_dp->link_status,
+ 					  DP_LANE_ALIGN_STATUS_UPDATED);
+ 	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ 		return false;
+-	for (lane = 0; lane < lane_count; lane++) {
+-		lane_status = intel_get_lane_status(link_status, lane);
++	for (lane = 0; lane < intel_dp->lane_count; lane++) {
++		lane_status = intel_get_lane_status(intel_dp->link_status, lane);
+ 		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ 			return false;
+ 	}
+@@ -1135,159 +1205,194 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+ }
+ 
+ static bool
++intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
++{
++	struct drm_device *dev = intel_dp->base.base.dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
++		return false;
++
++	return true;
++}
++
++static bool
+ intel_dp_set_link_train(struct intel_dp *intel_dp,
+ 			uint32_t dp_reg_value,
+-			uint8_t dp_train_pat,
+-			uint8_t train_set[4])
++			uint8_t dp_train_pat)
+ {
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+ 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ 	POSTING_READ(intel_dp->output_reg);
+ 
++	if (!intel_dp_aux_handshake_required(intel_dp))
++		return true;
++
+ 	intel_dp_aux_native_write_1(intel_dp,
+ 				    DP_TRAINING_PATTERN_SET,
+ 				    dp_train_pat);
+ 
+ 	ret = intel_dp_aux_native_write(intel_dp,
+-					DP_TRAINING_LANE0_SET, train_set, 4);
++					DP_TRAINING_LANE0_SET,
++					intel_dp->train_set, 4);
+ 	if (ret != 4)
+ 		return false;
+ 
+ 	return true;
+ }
+ 
++/* Enable corresponding port and start training pattern 1 */
+ static void
+-intel_dp_link_train(struct intel_dp *intel_dp)
++intel_dp_start_link_train(struct intel_dp *intel_dp)
+ {
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	uint8_t	train_set[4];
+-	uint8_t link_status[DP_LINK_STATUS_SIZE];
++	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ 	int i;
+ 	uint8_t voltage;
+ 	bool clock_recovery = false;
+-	bool channel_eq = false;
+ 	int tries;
+ 	u32 reg;
+ 	uint32_t DP = intel_dp->DP;
+-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+ 
+ 	/* Enable output, wait for it to become active */
+ 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ 	POSTING_READ(intel_dp->output_reg);
+ 	intel_wait_for_vblank(dev, intel_crtc->pipe);
+ 
+-	/* Write the link configuration data */
+-	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+-				  intel_dp->link_configuration,
+-				  DP_LINK_CONFIGURATION_SIZE);
++	if (intel_dp_aux_handshake_required(intel_dp))
++		/* Write the link configuration data */
++		intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
++					  intel_dp->link_configuration,
++					  DP_LINK_CONFIGURATION_SIZE);
+ 
+ 	DP |= DP_PORT_EN;
+-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ 	else
+ 		DP &= ~DP_LINK_TRAIN_MASK;
+-	memset(train_set, 0, 4);
++	memset(intel_dp->train_set, 0, 4);
+ 	voltage = 0xff;
+ 	tries = 0;
+ 	clock_recovery = false;
+ 	for (;;) {
+-		/* Use train_set[0] to set the voltage and pre emphasis values */
++		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ 		uint32_t    signal_levels;
+-		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+-			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
++		if (IS_GEN6(dev) && is_edp(intel_dp)) {
++			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ 		} else {
+-			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
++			signal_levels = intel_dp_signal_levels(intel_dp);
+ 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ 		}
+ 
+-		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ 		else
+ 			reg = DP | DP_LINK_TRAIN_PAT_1;
+ 
+ 		if (!intel_dp_set_link_train(intel_dp, reg,
+-					     DP_TRAINING_PATTERN_1, train_set))
++					     DP_TRAINING_PATTERN_1))
+ 			break;
+ 		/* Set training pattern 1 */
+ 
+-		udelay(100);
+-		if (!intel_dp_get_link_status(intel_dp, link_status))
++		udelay(500);
++		if (intel_dp_aux_handshake_required(intel_dp)) {
+ 			break;
++		} else {
++			if (!intel_dp_get_link_status(intel_dp))
++				break;
+ 
+-		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+-			clock_recovery = true;
+-			break;
+-		}
+-
+-		/* Check to see if we've tried the max voltage */
+-		for (i = 0; i < intel_dp->lane_count; i++)
+-			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++			if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
++				clock_recovery = true;
+ 				break;
+-		if (i == intel_dp->lane_count)
+-			break;
++			}
+ 
+-		/* Check to see if we've tried the same voltage 5 times */
+-		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+-			++tries;
+-			if (tries == 5)
++			/* Check to see if we've tried the max voltage */
++			for (i = 0; i < intel_dp->lane_count; i++)
++				if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++					break;
++			if (i == intel_dp->lane_count)
+ 				break;
+-		} else
+-			tries = 0;
+-		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ 
+-		/* Compute new train_set as requested by target */
+-		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
++			/* Check to see if we've tried the same voltage 5 times */
++			if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++				++tries;
++				if (tries == 5)
++					break;
++			} else
++				tries = 0;
++			voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
++
++			/* Compute new intel_dp->train_set as requested by target */
++			intel_get_adjust_train(intel_dp);
++		}
+ 	}
+ 
++	intel_dp->DP = DP;
++}
++
++static void
++intel_dp_complete_link_train(struct intel_dp *intel_dp)
++{
++	struct drm_device *dev = intel_dp->base.base.dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	bool channel_eq = false;
++	int tries;
++	u32 reg;
++	uint32_t DP = intel_dp->DP;
++
+ 	/* channel equalization */
+ 	tries = 0;
+ 	channel_eq = false;
+ 	for (;;) {
+-		/* Use train_set[0] to set the voltage and pre emphasis values */
++		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ 		uint32_t    signal_levels;
+ 
+-		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+-			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
++		if (IS_GEN6(dev) && is_edp(intel_dp)) {
++			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ 		} else {
+-			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
++			signal_levels = intel_dp_signal_levels(intel_dp);
+ 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ 		}
+ 
+-		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ 		else
+ 			reg = DP | DP_LINK_TRAIN_PAT_2;
+ 
+ 		/* channel eq pattern */
+ 		if (!intel_dp_set_link_train(intel_dp, reg,
+-					     DP_TRAINING_PATTERN_2, train_set))
++					     DP_TRAINING_PATTERN_2))
+ 			break;
+ 
+-		udelay(400);
+-		if (!intel_dp_get_link_status(intel_dp, link_status))
+-			break;
++		udelay(500);
+ 
+-		if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+-			channel_eq = true;
++		if (!intel_dp_aux_handshake_required(intel_dp)) {
+ 			break;
+-		}
++		} else {
++			if (!intel_dp_get_link_status(intel_dp))
++				break;
+ 
+-		/* Try 5 times */
+-		if (tries > 5)
+-			break;
++			if (intel_channel_eq_ok(intel_dp)) {
++				channel_eq = true;
++				break;
++			}
+ 
+-		/* Compute new train_set as requested by target */
+-		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+-		++tries;
+-	}
++			/* Try 5 times */
++			if (tries > 5)
++				break;
+ 
+-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++			/* Compute new intel_dp->train_set as requested by target */
++			intel_get_adjust_train(intel_dp);
++			++tries;
++		}
++	}
++	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ 	else
+ 		reg = DP | DP_LINK_TRAIN_OFF;
+@@ -1301,32 +1406,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
+ static void
+ intel_dp_link_down(struct intel_dp *intel_dp)
+ {
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	uint32_t DP = intel_dp->DP;
+ 
+ 	DRM_DEBUG_KMS("\n");
+ 
+-	if (IS_eDP(intel_dp)) {
++	if (is_edp(intel_dp)) {
+ 		DP &= ~DP_PLL_ENABLE;
+ 		I915_WRITE(intel_dp->output_reg, DP);
+ 		POSTING_READ(intel_dp->output_reg);
+ 		udelay(100);
+ 	}
+ 
+-	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
++	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
+ 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+-		POSTING_READ(intel_dp->output_reg);
+ 	} else {
+ 		DP &= ~DP_LINK_TRAIN_MASK;
+ 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+-		POSTING_READ(intel_dp->output_reg);
+ 	}
++	POSTING_READ(intel_dp->output_reg);
+ 
+-	udelay(17000);
++	msleep(17);
+ 
+-	if (IS_eDP(intel_dp))
++	if (is_edp(intel_dp))
+ 		DP |= DP_LINK_TRAIN_OFF;
+ 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ 	POSTING_READ(intel_dp->output_reg);
+@@ -1344,32 +1448,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ static void
+ intel_dp_check_link_status(struct intel_dp *intel_dp)
+ {
+-	uint8_t link_status[DP_LINK_STATUS_SIZE];
+-
+-	if (!intel_dp->base.enc.crtc)
++	if (!intel_dp->base.base.crtc)
+ 		return;
+ 
+-	if (!intel_dp_get_link_status(intel_dp, link_status)) {
++	if (!intel_dp_get_link_status(intel_dp)) {
+ 		intel_dp_link_down(intel_dp);
+ 		return;
+ 	}
+ 
+-	if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
+-		intel_dp_link_train(intel_dp);
++	if (!intel_channel_eq_ok(intel_dp)) {
++		intel_dp_start_link_train(intel_dp);
++		intel_dp_complete_link_train(intel_dp);
++	}
+ }
+ 
+ static enum drm_connector_status
+-ironlake_dp_detect(struct drm_connector *connector)
++ironlake_dp_detect(struct intel_dp *intel_dp)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ 	enum drm_connector_status status;
+ 
++	/* Can't disconnect eDP */
++	if (is_edp(intel_dp))
++		return connector_status_connected;
++
+ 	status = connector_status_disconnected;
+ 	if (intel_dp_aux_native_read(intel_dp,
+ 				     0x000, intel_dp->dpcd,
+-				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+-	{
++				     sizeof (intel_dp->dpcd))
++	    == sizeof(intel_dp->dpcd)) {
+ 		if (intel_dp->dpcd[0] != 0)
+ 			status = connector_status_connected;
+ 	}
+@@ -1378,26 +1484,13 @@ ironlake_dp_detect(struct drm_connector *connector)
+ 	return status;
+ }
+ 
+-/**
+- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+- *
+- * \return true if DP port is connected.
+- * \return false if DP port is disconnected.
+- */
+ static enum drm_connector_status
+-intel_dp_detect(struct drm_connector *connector, bool force)
++g4x_dp_detect(struct intel_dp *intel_dp)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	uint32_t temp, bit;
+ 	enum drm_connector_status status;
+-
+-	intel_dp->has_audio = false;
+-
+-	if (HAS_PCH_SPLIT(dev))
+-		return ironlake_dp_detect(connector);
++	uint32_t temp, bit;
+ 
+ 	switch (intel_dp->output_reg) {
+ 	case DP_B:
+@@ -1419,31 +1512,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
+ 		return connector_status_disconnected;
+ 
+ 	status = connector_status_disconnected;
+-	if (intel_dp_aux_native_read(intel_dp,
+-				     0x000, intel_dp->dpcd,
++	if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
+ 				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ 	{
+ 		if (intel_dp->dpcd[0] != 0)
+ 			status = connector_status_connected;
+ 	}
++
+ 	return status;
+ }
+ 
++/**
++ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
++ *
++ * \return true if DP port is connected.
++ * \return false if DP port is disconnected.
++ */
++static enum drm_connector_status
++intel_dp_detect(struct drm_connector *connector, bool force)
++{
++	struct intel_dp *intel_dp = intel_attached_dp(connector);
++	struct drm_device *dev = intel_dp->base.base.dev;
++	enum drm_connector_status status;
++	struct edid *edid = NULL;
++
++	intel_dp->has_audio = false;
++
++	if (HAS_PCH_SPLIT(dev))
++		status = ironlake_dp_detect(intel_dp);
++	else
++		status = g4x_dp_detect(intel_dp);
++	if (status != connector_status_connected)
++		return status;
++
++	if (intel_dp->force_audio) {
++		intel_dp->has_audio = intel_dp->force_audio > 0;
++	} else {
++		edid = drm_get_edid(connector, &intel_dp->adapter);
++		if (edid) {
++			intel_dp->has_audio = drm_detect_monitor_audio(edid);
++			connector->display_info.raw_edid = NULL;
++			kfree(edid);
++		}
++	}
++
++	return connector_status_connected;
++}
++
+ static int intel_dp_get_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+-	struct drm_device *dev = intel_dp->base.enc.dev;
++	struct intel_dp *intel_dp = intel_attached_dp(connector);
++	struct drm_device *dev = intel_dp->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+ 	/* We should parse the EDID data and find out if it has an audio sink
+ 	 */
+ 
+-	ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
++	ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
+ 	if (ret) {
+-		if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+-		    !dev_priv->panel_fixed_mode) {
++		if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
+ 			struct drm_display_mode *newmode;
+ 			list_for_each_entry(newmode, &connector->probed_modes,
+ 					    head) {
+@@ -1459,7 +1587,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
+ 	}
+ 
+ 	/* if eDP has no EDID, try to use fixed panel mode from VBT */
+-	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
++	if (is_edp(intel_dp)) {
+ 		if (dev_priv->panel_fixed_mode != NULL) {
+ 			struct drm_display_mode *mode;
+ 			mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+@@ -1470,6 +1598,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
+ 	return 0;
+ }
+ 
++static int
++intel_dp_set_property(struct drm_connector *connector,
++		      struct drm_property *property,
++		      uint64_t val)
++{
++	struct intel_dp *intel_dp = intel_attached_dp(connector);
++	int ret;
++
++	ret = drm_connector_property_set_value(connector, property, val);
++	if (ret)
++		return ret;
++
++	if (property == intel_dp->force_audio_property) {
++		if (val == intel_dp->force_audio)
++			return 0;
++
++		intel_dp->force_audio = val;
++
++		if (val > 0 && intel_dp->has_audio)
++			return 0;
++		if (val < 0 && !intel_dp->has_audio)
++			return 0;
++
++		intel_dp->has_audio = val > 0;
++		goto done;
++	}
++
++	return -EINVAL;
++
++done:
++	if (intel_dp->base.base.crtc) {
++		struct drm_crtc *crtc = intel_dp->base.base.crtc;
++		drm_crtc_helper_set_mode(crtc, &crtc->mode,
++					 crtc->x, crtc->y,
++					 crtc->fb);
++	}
++
++	return 0;
++}
++
+ static void
+ intel_dp_destroy (struct drm_connector *connector)
+ {
+@@ -1478,6 +1646,15 @@ intel_dp_destroy (struct drm_connector *connector)
+ 	kfree(connector);
+ }
+ 
++static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
++{
++	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++
++	i2c_del_adapter(&intel_dp->adapter);
++	drm_encoder_cleanup(encoder);
++	kfree(intel_dp);
++}
++
+ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ 	.dpms = intel_dp_dpms,
+ 	.mode_fixup = intel_dp_mode_fixup,
+@@ -1490,20 +1667,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ 	.dpms = drm_helper_connector_dpms,
+ 	.detect = intel_dp_detect,
+ 	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = intel_dp_set_property,
+ 	.destroy = intel_dp_destroy,
+ };
+ 
+ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ 	.get_modes = intel_dp_get_modes,
+ 	.mode_valid = intel_dp_mode_valid,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+-	.destroy = intel_encoder_destroy,
++	.destroy = intel_dp_encoder_destroy,
+ };
+ 
+-void
++static void
+ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+ {
+ 	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+@@ -1554,6 +1732,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
+ 	return false;
+ }
+ 
++static void
++intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++
++	intel_dp->force_audio_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++	if (intel_dp->force_audio_property) {
++		intel_dp->force_audio_property->values[0] = -1;
++		intel_dp->force_audio_property->values[1] = 1;
++		drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
++	}
++}
++
+ void
+ intel_dp_init(struct drm_device *dev, int output_reg)
+ {
+@@ -1580,7 +1772,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 		if (intel_dpd_is_edp(dev))
+ 			intel_dp->is_pch_edp = true;
+ 
+-	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
++	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ 		type = DRM_MODE_CONNECTOR_eDP;
+ 		intel_encoder->type = INTEL_OUTPUT_EDP;
+ 	} else {
+@@ -1601,7 +1793,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 	else if (output_reg == DP_D || output_reg == PCH_DP_D)
+ 		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ 
+-	if (IS_eDP(intel_dp))
++	if (is_edp(intel_dp))
+ 		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ 
+ 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+@@ -1612,12 +1804,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 	intel_dp->has_audio = false;
+ 	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+ 
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
++	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ 			 DRM_MODE_ENCODER_TMDS);
+-	drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
++	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ 
+-	drm_mode_connector_attach_encoder(&intel_connector->base,
+-					  &intel_encoder->enc);
++	intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 	drm_sysfs_connector_add(connector);
+ 
+ 	/* Set up the DDC bus. */
+@@ -1647,10 +1838,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 
+ 	intel_dp_i2c_init(intel_dp, intel_connector, name);
+ 
+-	intel_encoder->ddc_bus = &intel_dp->adapter;
++	/* Cache some DPCD data in the eDP case */
++	if (is_edp(intel_dp)) {
++		int ret;
++		bool was_on;
++
++		was_on = ironlake_edp_panel_on(intel_dp);
++		ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
++					       intel_dp->dpcd,
++					       sizeof(intel_dp->dpcd));
++		if (ret == sizeof(intel_dp->dpcd)) {
++			if (intel_dp->dpcd[0] >= 0x11)
++				dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
++					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
++		} else {
++			DRM_ERROR("failed to retrieve link info\n");
++		}
++		if (!was_on)
++			ironlake_edp_panel_off(dev);
++	}
++
+ 	intel_encoder->hot_plug = intel_dp_hot_plug;
+ 
+-	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
++	if (is_edp(intel_dp)) {
+ 		/* initialize panel mode from VBT if available for eDP */
+ 		if (dev_priv->lfp_lvds_vbt_mode) {
+ 			dev_priv->panel_fixed_mode =
+@@ -1662,6 +1872,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 		}
+ 	}
+ 
++	intel_dp_add_properties(intel_dp, connector);
++
+ 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ 	 * 0xd.  Failure to do so will result in spurious interrupts being
+ 	 * generated on the port when a cable is not attached.
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 8828b3a..21551fe 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -26,14 +26,12 @@
+ #define __INTEL_DRV_H__
+ 
+ #include <linux/i2c.h>
+-#include <linux/i2c-id.h>
+-#include <linux/i2c-algo-bit.h>
+ #include "i915_drv.h"
+ #include "drm_crtc.h"
+-
+ #include "drm_crtc_helper.h"
++#include "drm_fb_helper.h"
+ 
+-#define wait_for(COND, MS, W) ({ \
++#define _wait_for(COND, MS, W) ({ \
+ 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+ 	int ret__ = 0;							\
+ 	while (! (COND)) {						\
+@@ -41,11 +39,24 @@
+ 			ret__ = -ETIMEDOUT;				\
+ 			break;						\
+ 		}							\
+-		if (W) msleep(W);					\
++		if (W && !in_dbg_master()) msleep(W);			\
+ 	}								\
+ 	ret__;								\
+ })
+ 
++#define wait_for(COND, MS) _wait_for(COND, MS, 1)
++#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
++
++#define MSLEEP(x) do { \
++	if (in_dbg_master()) \
++	       	mdelay(x); \
++	else \
++		msleep(x); \
++} while(0)
++
++#define KHz(x) (1000*x)
++#define MHz(x) KHz(1000*x)
++
+ /*
+  * Display related stuff
+  */
+@@ -96,24 +107,39 @@
+ #define INTEL_DVO_CHIP_TMDS 2
+ #define INTEL_DVO_CHIP_TVOUT 4
+ 
+-struct intel_i2c_chan {
+-	struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
+-	u32 reg; /* GPIO reg */
+-	struct i2c_adapter adapter;
+-	struct i2c_algo_bit_data algo;
+-};
++/* drm_display_mode->private_flags */
++#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
++#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
++
++static inline void
++intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
++				int multiplier)
++{
++	mode->clock *= multiplier;
++	mode->private_flags |= multiplier;
++}
++
++static inline int
++intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
++{
++	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
++}
+ 
+ struct intel_framebuffer {
+ 	struct drm_framebuffer base;
+ 	struct drm_gem_object *obj;
+ };
+ 
++struct intel_fbdev {
++	struct drm_fb_helper helper;
++	struct intel_framebuffer ifb;
++	struct list_head fbdev_list;
++	struct drm_display_mode *our_mode;
++};
+ 
+ struct intel_encoder {
+-	struct drm_encoder enc;
++	struct drm_encoder base;
+ 	int type;
+-	struct i2c_adapter *i2c_bus;
+-	struct i2c_adapter *ddc_bus;
+ 	bool load_detect_temp;
+ 	bool needs_tv_clock;
+ 	void (*hot_plug)(struct intel_encoder *);
+@@ -123,32 +149,7 @@ struct intel_encoder {
+ 
+ struct intel_connector {
+ 	struct drm_connector base;
+-};
+-
+-struct intel_crtc;
+-struct intel_overlay {
+-	struct drm_device *dev;
+-	struct intel_crtc *crtc;
+-	struct drm_i915_gem_object *vid_bo;
+-	struct drm_i915_gem_object *old_vid_bo;
+-	int active;
+-	int pfit_active;
+-	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+-	u32 color_key;
+-	u32 brightness, contrast, saturation;
+-	u32 old_xscale, old_yscale;
+-	/* register access */
+-	u32 flip_addr;
+-	struct drm_i915_gem_object *reg_bo;
+-	void *virt_addr;
+-	/* flip handling */
+-	uint32_t last_flip_req;
+-	int hw_wedged;
+-#define HW_WEDGED		1
+-#define NEEDS_WAIT_FOR_FLIP	2
+-#define RELEASE_OLD_VID		3
+-#define SWITCH_OFF_STAGE_1	4
+-#define SWITCH_OFF_STAGE_2	5
++	struct intel_encoder *encoder;
+ };
+ 
+ struct intel_crtc {
+@@ -157,6 +158,7 @@ struct intel_crtc {
+ 	enum plane plane;
+ 	u8 lut_r[256], lut_g[256], lut_b[256];
+ 	int dpms_mode;
++	bool active; /* is the crtc on? independent of the dpms mode */
+ 	bool busy; /* is scanout buffer being updated frequently? */
+ 	struct timer_list idle_timer;
+ 	bool lowfreq_avail;
+@@ -168,14 +170,53 @@ struct intel_crtc {
+ 	uint32_t cursor_addr;
+ 	int16_t cursor_x, cursor_y;
+ 	int16_t cursor_width, cursor_height;
+-	bool cursor_visible, cursor_on;
++	bool cursor_visible;
+ };
+ 
+ #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+ #define to_intel_connector(x) container_of(x, struct intel_connector, base)
+-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
++#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+ #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+ 
++#define DIP_TYPE_AVI    0x82
++#define DIP_VERSION_AVI 0x2
++#define DIP_LEN_AVI     13
++
++struct dip_infoframe {
++	uint8_t type;		/* HB0 */
++	uint8_t ver;		/* HB1 */
++	uint8_t len;		/* HB2 - body len, not including checksum */
++	uint8_t ecc;		/* Header ECC */
++	uint8_t checksum;	/* PB0 */
++	union {
++		struct {
++			/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
++			uint8_t Y_A_B_S;
++			/* PB2 - C 7:6, M 5:4, R 3:0 */
++			uint8_t C_M_R;
++			/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
++			uint8_t ITC_EC_Q_SC;
++			/* PB4 - VIC 6:0 */
++			uint8_t VIC;
++			/* PB5 - PR 3:0 */
++			uint8_t PR;
++			/* PB6 to PB13 */
++			uint16_t top_bar_end;
++			uint16_t bottom_bar_start;
++			uint16_t left_bar_end;
++			uint16_t right_bar_start;
++		} avi;
++		uint8_t payload[27];
++	} __attribute__ ((packed)) body;
++} __attribute__((packed));
++
++static inline struct drm_crtc *
++intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	return dev_priv->pipe_to_crtc_mapping[pipe];
++}
++
+ struct intel_unpin_work {
+ 	struct work_struct work;
+ 	struct drm_device *dev;
+@@ -186,16 +227,12 @@ struct intel_unpin_work {
+ 	bool enable_stall_check;
+ };
+ 
+-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+-				     const char *name);
+-void intel_i2c_destroy(struct i2c_adapter *adapter);
+ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
+-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+-void intel_i2c_reset_gmbus(struct drm_device *dev);
++extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+ 
+ extern void intel_crt_init(struct drm_device *dev);
+ extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
++void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+ extern void intel_dvo_init(struct drm_device *dev);
+ extern void intel_tv_init(struct drm_device *dev);
+@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+ void
+ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ 		 struct drm_display_mode *adjusted_mode);
+-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
+ extern bool intel_dpd_is_edp(struct drm_device *dev);
+ extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
++extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+ 
+-
++/* intel_panel.c */
+ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ 				   struct drm_display_mode *adjusted_mode);
+ extern void intel_pch_panel_fitting(struct drm_device *dev,
+ 				    int fitting_mode,
+ 				    struct drm_display_mode *mode,
+ 				    struct drm_display_mode *adjusted_mode);
++extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
++extern u32 intel_panel_get_backlight(struct drm_device *dev);
++extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+ 
+-extern int intel_panel_fitter_pipe (struct drm_device *dev);
+ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+ extern void intel_encoder_prepare (struct drm_encoder *encoder);
+ extern void intel_encoder_commit (struct drm_encoder *encoder);
+ extern void intel_encoder_destroy(struct drm_encoder *encoder);
+ 
+-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
++static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
++{
++	return to_intel_connector(connector)->encoder;
++}
++
++extern void intel_connector_attach_encoder(struct intel_connector *connector,
++					   struct intel_encoder *encoder);
++extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+ 
+ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ 						    struct drm_crtc *crtc);
+ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ 				struct drm_file *file_priv);
+ extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
++extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+ extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ 						   struct drm_connector *connector,
+ 						   struct drm_display_mode *mode,
+@@ -250,9 +296,11 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ extern void intel_init_clock_gating(struct drm_device *dev);
+ extern void ironlake_enable_drps(struct drm_device *dev);
+ extern void ironlake_disable_drps(struct drm_device *dev);
++extern void intel_init_emon(struct drm_device *dev);
+ 
+ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+-				      struct drm_gem_object *obj);
++				      struct drm_gem_object *obj,
++				      bool pipelined);
+ 
+ extern int intel_framebuffer_init(struct drm_device *dev,
+ 				  struct intel_framebuffer *ifb,
+@@ -267,9 +315,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+ 
+ extern void intel_setup_overlay(struct drm_device *dev);
+ extern void intel_cleanup_overlay(struct drm_device *dev);
+-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+-						int interruptible);
++extern int intel_overlay_switch_off(struct intel_overlay *overlay,
++				    bool interruptible);
+ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 				   struct drm_file *file_priv);
+ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 7c9ec14..ea37328 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
+ 		.name = "ch7017",
+ 		.dvo_reg = DVOC,
+ 		.slave_addr = 0x75,
+-		.gpio = GPIOE,
++		.gpio = GMBUS_PORT_DPB,
+ 		.dev_ops = &ch7017_ops,
+ 	}
+ };
+@@ -88,7 +88,13 @@ struct intel_dvo {
+ 
+ static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
++	return container_of(encoder, struct intel_dvo, base.base);
++}
++
++static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
++{
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_dvo, base);
+ }
+ 
+ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_dvo_mode_valid(struct drm_connector *connector,
+ 				struct drm_display_mode *mode)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
+ static enum drm_connector_status
+ intel_dvo_detect(struct drm_connector *connector, bool force)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+-
++	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ 	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+ }
+ 
+ static int intel_dvo_get_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
++	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ 
+ 	/* We should probably have an i2c driver get_modes function for those
+ 	 * devices which will have a fixed set of modes determined by the chip
+ 	 * (TV-out, for example), but for now with just TMDS and LVDS,
+ 	 * that's not the case.
+ 	 */
+-	intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
++	intel_ddc_get_modes(connector,
++			    &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ 	if (!list_empty(&connector->probed_modes))
+ 		return 1;
+ 
+@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ 	.mode_valid = intel_dvo_mode_valid,
+ 	.get_modes = intel_dvo_get_modes,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+ {
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ 	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ 	struct drm_display_mode *mode = NULL;
+ 
+@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+ 		struct drm_crtc *crtc;
+ 		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+ 
+-		crtc = intel_get_crtc_from_pipe(dev, pipe);
++		crtc = intel_get_crtc_for_pipe(dev, pipe);
+ 		if (crtc) {
+ 			mode = intel_crtc_mode_get(dev, crtc);
+ 			if (mode) {
+@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+ 
+ void intel_dvo_init(struct drm_device *dev)
+ {
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *intel_encoder;
+ 	struct intel_dvo *intel_dvo;
+ 	struct intel_connector *intel_connector;
+-	struct i2c_adapter *i2cbus = NULL;
+-	int ret = 0;
+ 	int i;
+ 	int encoder_type = DRM_MODE_ENCODER_NONE;
+ 
+@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
+ 	}
+ 
+ 	intel_encoder = &intel_dvo->base;
+-
+-	/* Set up the DDC bus */
+-	intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+-	if (!intel_encoder->ddc_bus)
+-		goto free_intel;
++	drm_encoder_init(dev, &intel_encoder->base,
++			 &intel_dvo_enc_funcs, encoder_type);
+ 
+ 	/* Now, try to find a controller */
+ 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ 		struct drm_connector *connector = &intel_connector->base;
+ 		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
++		struct i2c_adapter *i2c;
+ 		int gpio;
+ 
+ 		/* Allow the I2C driver info to specify the GPIO to be used in
+@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
+ 		if (dvo->gpio != 0)
+ 			gpio = dvo->gpio;
+ 		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+-			gpio = GPIOB;
++			gpio = GMBUS_PORT_SSC;
+ 		else
+-			gpio = GPIOE;
++			gpio = GMBUS_PORT_DPB;
+ 
+ 		/* Set up the I2C bus necessary for the chip we're probing.
+ 		 * It appears that everything is on GPIOE except for panels
+ 		 * on i830 laptops, which are on GPIOB (DVOA).
+ 		 */
+-		if (i2cbus != NULL)
+-			intel_i2c_destroy(i2cbus);
+-		if (!(i2cbus = intel_i2c_create(dev, gpio,
+-			gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+-			continue;
+-		}
++		i2c = &dev_priv->gmbus[gpio].adapter;
+ 
+ 		intel_dvo->dev = *dvo;
+-		ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
+-		if (!ret)
++		if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+ 			continue;
+ 
+ 		intel_encoder->type = INTEL_OUTPUT_DVO;
+@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
+ 		connector->interlace_allowed = false;
+ 		connector->doublescan_allowed = false;
+ 
+-		drm_encoder_init(dev, &intel_encoder->enc,
+-				 &intel_dvo_enc_funcs, encoder_type);
+-		drm_encoder_helper_add(&intel_encoder->enc,
++		drm_encoder_helper_add(&intel_encoder->base,
+ 				       &intel_dvo_helper_funcs);
+ 
+-		drm_mode_connector_attach_encoder(&intel_connector->base,
+-						  &intel_encoder->enc);
++		intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ 			/* For our LVDS chipsets, we should hopefully be able
+ 			 * to dig the fixed panel mode out of the BIOS data.
+@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
+ 		return;
+ 	}
+ 
+-	intel_i2c_destroy(intel_encoder->ddc_bus);
+-	/* Didn't find a chip, so tear down. */
+-	if (i2cbus != NULL)
+-		intel_i2c_destroy(i2cbus);
+-free_intel:
++	drm_encoder_cleanup(&intel_encoder->base);
+ 	kfree(intel_dvo);
+ 	kfree(intel_connector);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index b61966c..af2a1dd 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -44,13 +44,6 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ 
+-struct intel_fbdev {
+-	struct drm_fb_helper helper;
+-	struct intel_framebuffer ifb;
+-	struct list_head fbdev_list;
+-	struct drm_display_mode *our_mode;
+-};
+-
+ static struct fb_ops intelfb_ops = {
+ 	.owner = THIS_MODULE,
+ 	.fb_check_var = drm_fb_helper_check_var,
+@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 	struct drm_gem_object *fbo = NULL;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	struct device *device = &dev->pdev->dev;
+-	int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++	int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ 
+ 	/* we don't do packed 24bpp */
+ 	if (sizes->surface_bpp == 24)
+@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	ret = intel_pin_and_fence_fb_obj(dev, fbo);
++	/* Flush everything out, we'll be doing GTT only from now on */
++	ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ 	if (ret) {
+ 		DRM_ERROR("failed to pin fb: %d\n", ret);
+ 		goto out_unref;
+ 	}
+ 
+-	/* Flush everything out, we'll be doing GTT only from now on */
+-	ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
+-	if (ret) {
+-		DRM_ERROR("failed to bind fb: %d.\n", ret);
+-		goto out_unpin;
+-	}
+-
+ 	info = framebuffer_alloc(0, device);
+ 	if (!info) {
+ 		ret = -ENOMEM;
+@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 		goto out_unpin;
+ 	}
+ 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
+-	if (IS_I9XX(dev))
++	if (!IS_GEN2(dev))
+ 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
+ 	else
+ 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ 	.fb_probe = intel_fb_find_or_create_single,
+ };
+ 
+-int intel_fbdev_destroy(struct drm_device *dev,
+-			struct intel_fbdev *ifbdev)
++static void intel_fbdev_destroy(struct drm_device *dev,
++				struct intel_fbdev *ifbdev)
+ {
+ 	struct fb_info *info;
+ 	struct intel_framebuffer *ifb = &ifbdev->ifb;
+@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
+ 
+ 	drm_framebuffer_cleanup(&ifb->base);
+ 	if (ifb->obj) {
+-		drm_gem_object_unreference(ifb->obj);
++		drm_gem_object_unreference_unlocked(ifb->obj);
+ 		ifb->obj = NULL;
+ 	}
+-
+-	return 0;
+ }
+ 
+ int intel_fbdev_init(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 926934a..0d0273e 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -40,12 +40,76 @@
+ struct intel_hdmi {
+ 	struct intel_encoder base;
+ 	u32 sdvox_reg;
++	int ddc_bus;
+ 	bool has_hdmi_sink;
++	bool has_audio;
++	int force_audio;
++	struct drm_property *force_audio_property;
+ };
+ 
+ static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
++	return container_of(encoder, struct intel_hdmi, base.base);
++}
++
++static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
++{
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_hdmi, base);
++}
++
++void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
++{
++	uint8_t *data = (uint8_t *)avi_if;
++	uint8_t sum = 0;
++	unsigned i;
++
++	avi_if->checksum = 0;
++	avi_if->ecc = 0;
++
++	for (i = 0; i < sizeof(*avi_if); i++)
++		sum += data[i];
++
++	avi_if->checksum = 0x100 - sum;
++}
++
++static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
++{
++	struct dip_infoframe avi_if = {
++		.type = DIP_TYPE_AVI,
++		.ver = DIP_VERSION_AVI,
++		.len = DIP_LEN_AVI,
++	};
++	uint32_t *data = (uint32_t *)&avi_if;
++	struct drm_device *dev = encoder->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
++	u32 port;
++	unsigned i;
++
++	if (!intel_hdmi->has_hdmi_sink)
++		return;
++
++	/* XXX first guess at handling video port, is this corrent? */
++	if (intel_hdmi->sdvox_reg == SDVOB)
++		port = VIDEO_DIP_PORT_B;
++	else if (intel_hdmi->sdvox_reg == SDVOC)
++		port = VIDEO_DIP_PORT_C;
++	else
++		return;
++
++	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
++		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
++
++	intel_dip_infoframe_csum(&avi_if);
++	for (i = 0; i < sizeof(avi_if); i += 4) {
++		I915_WRITE(VIDEO_DIP_DATA, *data);
++		data++;
++	}
++
++	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
++		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
++		   VIDEO_DIP_ENABLE_AVI);
+ }
+ 
+ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+ 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ 		sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ 
+-	if (intel_hdmi->has_hdmi_sink) {
++	/* Required on CPT */
++	if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
++		sdvox |= HDMI_MODE_SELECT;
++
++	if (intel_hdmi->has_audio) {
+ 		sdvox |= SDVO_AUDIO_ENABLE;
+-		if (HAS_PCH_CPT(dev))
+-			sdvox |= HDMI_MODE_SELECT;
++		sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ 	}
+ 
+ 	if (intel_crtc->pipe == 1) {
+@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+ 
+ 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ 	POSTING_READ(intel_hdmi->sdvox_reg);
++
++	intel_hdmi_set_avi_infoframe(encoder);
+ }
+ 
+ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ static enum drm_connector_status
+ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+-	struct edid *edid = NULL;
++	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++	struct drm_i915_private *dev_priv = connector->dev->dev_private;
++	struct edid *edid;
+ 	enum drm_connector_status status = connector_status_disconnected;
+ 
+ 	intel_hdmi->has_hdmi_sink = false;
+-	edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
++	intel_hdmi->has_audio = false;
++	edid = drm_get_edid(connector,
++			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ 
+ 	if (edid) {
+ 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ 			status = connector_status_connected;
+ 			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
++			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ 		}
+ 		connector->display_info.raw_edid = NULL;
+ 		kfree(edid);
+ 	}
+ 
++	if (status == connector_status_connected) {
++		if (intel_hdmi->force_audio)
++			intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
++	}
++
+ 	return status;
+ }
+ 
+ static int intel_hdmi_get_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
++	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ 
+ 	/* We should parse the EDID data and find out if it's an HDMI sink so
+ 	 * we can send audio to it.
+ 	 */
+ 
+-	return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
++	return intel_ddc_get_modes(connector,
++				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
++}
++
++static int
++intel_hdmi_set_property(struct drm_connector *connector,
++		      struct drm_property *property,
++		      uint64_t val)
++{
++	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++	int ret;
++
++	ret = drm_connector_property_set_value(connector, property, val);
++	if (ret)
++		return ret;
++
++	if (property == intel_hdmi->force_audio_property) {
++		if (val == intel_hdmi->force_audio)
++			return 0;
++
++		intel_hdmi->force_audio = val;
++
++		if (val > 0 && intel_hdmi->has_audio)
++			return 0;
++		if (val < 0 && !intel_hdmi->has_audio)
++			return 0;
++
++		intel_hdmi->has_audio = val > 0;
++		goto done;
++	}
++
++	return -EINVAL;
++
++done:
++	if (intel_hdmi->base.base.crtc) {
++		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
++		drm_crtc_helper_set_mode(crtc, &crtc->mode,
++					 crtc->x, crtc->y,
++					 crtc->fb);
++	}
++
++	return 0;
+ }
+ 
+ static void intel_hdmi_destroy(struct drm_connector *connector)
+@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ 	.dpms = drm_helper_connector_dpms,
+ 	.detect = intel_hdmi_detect,
+ 	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = intel_hdmi_set_property,
+ 	.destroy = intel_hdmi_destroy,
+ };
+ 
+ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ 	.get_modes = intel_hdmi_get_modes,
+ 	.mode_valid = intel_hdmi_mode_valid,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ 	.destroy = intel_encoder_destroy,
+ };
+ 
++static void
++intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++
++	intel_hdmi->force_audio_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++	if (intel_hdmi->force_audio_property) {
++		intel_hdmi->force_audio_property->values[0] = -1;
++		intel_hdmi->force_audio_property->values[1] = 1;
++		drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
++	}
++}
++
+ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ 	}
+ 
+ 	intel_encoder = &intel_hdmi->base;
++	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
++			 DRM_MODE_ENCODER_TMDS);
++
+ 	connector = &intel_connector->base;
+ 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ 			   DRM_MODE_CONNECTOR_HDMIA);
+@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ 	/* Set up the DDC bus. */
+ 	if (sdvox_reg == SDVOB) {
+ 		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
++		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ 	} else if (sdvox_reg == SDVOC) {
+ 		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
++		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ 	} else if (sdvox_reg == HDMIB) {
+ 		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
+-								"HDMIB");
++		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ 	} else if (sdvox_reg == HDMIC) {
+ 		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
+-								"HDMIC");
++		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ 	} else if (sdvox_reg == HDMID) {
+ 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
+-								"HDMID");
++		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ 	}
+-	if (!intel_encoder->ddc_bus)
+-		goto err_connector;
+ 
+ 	intel_hdmi->sdvox_reg = sdvox_reg;
+ 
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
+-			 DRM_MODE_ENCODER_TMDS);
+-	drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
++	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
++
++	intel_hdmi_add_properties(intel_hdmi, connector);
+ 
+-	drm_mode_connector_attach_encoder(&intel_connector->base,
+-					  &intel_encoder->enc);
++	intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 	drm_sysfs_connector_add(connector);
+ 
+ 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ 	}
+-
+-	return;
+-
+-err_connector:
+-	drm_connector_cleanup(connector);
+-	kfree(intel_hdmi);
+-	kfree(intel_connector);
+-
+-	return;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index c2649c7..2be4f72 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -1,6 +1,6 @@
+ /*
+  * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+- * Copyright © 2006-2008 Intel Corporation
++ * Copyright © 2006-2008,2010 Intel Corporation
+  *   Jesse Barnes <jesse.barnes@intel.com>
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -24,10 +24,9 @@
+  *
+  * Authors:
+  *	Eric Anholt <eric@anholt.net>
++ *	Chris Wilson <chris@chris-wilson.co.uk>
+  */
+ #include <linux/i2c.h>
+-#include <linux/slab.h>
+-#include <linux/i2c-id.h>
+ #include <linux/i2c-algo-bit.h>
+ #include "drmP.h"
+ #include "drm.h"
+@@ -35,79 +34,106 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ 
+-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
++/* Intel GPIO access functions */
++
++#define I2C_RISEFALL_TIME 20
++
++static inline struct intel_gmbus *
++to_intel_gmbus(struct i2c_adapter *i2c)
++{
++	return container_of(i2c, struct intel_gmbus, adapter);
++}
++
++struct intel_gpio {
++	struct i2c_adapter adapter;
++	struct i2c_algo_bit_data algo;
++	struct drm_i915_private *dev_priv;
++	u32 reg;
++};
++
++void
++intel_i2c_reset(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
++	if (HAS_PCH_SPLIT(dev))
++		I915_WRITE(PCH_GMBUS0, 0);
++	else
++		I915_WRITE(GMBUS0, 0);
++}
++
++static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
++{
++	u32 val;
+ 
+ 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
+-	if (!IS_PINEVIEW(dev))
++	if (!IS_PINEVIEW(dev_priv->dev))
+ 		return;
++
++	val = I915_READ(DSPCLK_GATE_D);
+ 	if (enable)
+-		I915_WRITE(DSPCLK_GATE_D,
+-			I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
++		val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ 	else
+-		I915_WRITE(DSPCLK_GATE_D,
+-			I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
++		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
++	I915_WRITE(DSPCLK_GATE_D, val);
+ }
+ 
+-/*
+- * Intel GPIO access functions
+- */
++static u32 get_reserved(struct intel_gpio *gpio)
++{
++	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = 0;
+ 
+-#define I2C_RISEFALL_TIME 20
++	/* On most chips, these bits must be preserved in software. */
++	if (!IS_I830(dev) && !IS_845G(dev))
++		reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
++						   GPIO_CLOCK_PULLUP_DISABLE);
++
++	return reserved;
++}
+ 
+ static int get_clock(void *data)
+ {
+-	struct intel_i2c_chan *chan = data;
+-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+-	u32 val;
+-
+-	val = I915_READ(chan->reg);
+-	return ((val & GPIO_CLOCK_VAL_IN) != 0);
++	struct intel_gpio *gpio = data;
++	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	u32 reserved = get_reserved(gpio);
++	I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
++	I915_WRITE(gpio->reg, reserved);
++	return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ }
+ 
+ static int get_data(void *data)
+ {
+-	struct intel_i2c_chan *chan = data;
+-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+-	u32 val;
+-
+-	val = I915_READ(chan->reg);
+-	return ((val & GPIO_DATA_VAL_IN) != 0);
++	struct intel_gpio *gpio = data;
++	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	u32 reserved = get_reserved(gpio);
++	I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
++	I915_WRITE(gpio->reg, reserved);
++	return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ }
+ 
+ static void set_clock(void *data, int state_high)
+ {
+-	struct intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+-	u32 reserved = 0, clock_bits;
+-
+-	/* On most chips, these bits must be preserved in software. */
+-	if (!IS_I830(dev) && !IS_845G(dev))
+-		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+-						   GPIO_CLOCK_PULLUP_DISABLE);
++	struct intel_gpio *gpio = data;
++	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	u32 reserved = get_reserved(gpio);
++	u32 clock_bits;
+ 
+ 	if (state_high)
+ 		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ 	else
+ 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ 			GPIO_CLOCK_VAL_MASK;
+-	I915_WRITE(chan->reg, reserved | clock_bits);
+-	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++
++	I915_WRITE(gpio->reg, reserved | clock_bits);
++	POSTING_READ(gpio->reg);
+ }
+ 
+ static void set_data(void *data, int state_high)
+ {
+-	struct intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+-	u32 reserved = 0, data_bits;
+-
+-	/* On most chips, these bits must be preserved in software. */
+-	if (!IS_I830(dev) && !IS_845G(dev))
+-		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+-						   GPIO_CLOCK_PULLUP_DISABLE);
++	struct intel_gpio *gpio = data;
++	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	u32 reserved = get_reserved(gpio);
++	u32 data_bits;
+ 
+ 	if (state_high)
+ 		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
+ 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ 			GPIO_DATA_VAL_MASK;
+ 
+-	I915_WRITE(chan->reg, reserved | data_bits);
+-	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++	I915_WRITE(gpio->reg, reserved | data_bits);
++	POSTING_READ(gpio->reg);
+ }
+ 
+-/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
+- * engine, but if the BIOS leaves it enabled, then that can break our use
+- * of the bit-banging I2C interfaces.  This is notably the case with the
+- * Mac Mini in EFI mode.
+- */
+-void
+-intel_i2c_reset_gmbus(struct drm_device *dev)
++static struct i2c_adapter *
++intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
+ {
+-	struct drm_i915_private *dev_priv = dev->dev_private;
++	static const int map_pin_to_reg[] = {
++		0,
++		GPIOB,
++		GPIOA,
++		GPIOC,
++		GPIOD,
++		GPIOE,
++		0,
++		GPIOF,
++	};
++	struct intel_gpio *gpio;
+ 
+-	if (HAS_PCH_SPLIT(dev)) {
+-		I915_WRITE(PCH_GMBUS0, 0);
+-	} else {
+-		I915_WRITE(GMBUS0, 0);
++	if (pin < 1 || pin > 7)
++		return NULL;
++
++	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
++	if (gpio == NULL)
++		return NULL;
++
++	gpio->reg = map_pin_to_reg[pin];
++	if (HAS_PCH_SPLIT(dev_priv->dev))
++		gpio->reg += PCH_GPIOA - GPIOA;
++	gpio->dev_priv = dev_priv;
++
++	snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
++	gpio->adapter.owner = THIS_MODULE;
++	gpio->adapter.algo_data	= &gpio->algo;
++	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
++	gpio->algo.setsda = set_data;
++	gpio->algo.setscl = set_clock;
++	gpio->algo.getsda = get_data;
++	gpio->algo.getscl = get_clock;
++	gpio->algo.udelay = I2C_RISEFALL_TIME;
++	gpio->algo.timeout = usecs_to_jiffies(2200);
++	gpio->algo.data = gpio;
++
++	if (i2c_bit_add_bus(&gpio->adapter))
++		goto out_free;
++
++	return &gpio->adapter;
++
++out_free:
++	kfree(gpio);
++	return NULL;
++}
++
++static int
++intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
++		     struct i2c_adapter *adapter,
++		     struct i2c_msg *msgs,
++		     int num)
++{
++	struct intel_gpio *gpio = container_of(adapter,
++					       struct intel_gpio,
++					       adapter);
++	int ret;
++
++	intel_i2c_reset(dev_priv->dev);
++
++	intel_i2c_quirk_set(dev_priv, true);
++	set_data(gpio, 1);
++	set_clock(gpio, 1);
++	udelay(I2C_RISEFALL_TIME);
++
++	ret = adapter->algo->master_xfer(adapter, msgs, num);
++
++	set_data(gpio, 1);
++	set_clock(gpio, 1);
++	intel_i2c_quirk_set(dev_priv, false);
++
++	return ret;
++}
++
++static int
++gmbus_xfer(struct i2c_adapter *adapter,
++	   struct i2c_msg *msgs,
++	   int num)
++{
++	struct intel_gmbus *bus = container_of(adapter,
++					       struct intel_gmbus,
++					       adapter);
++	struct drm_i915_private *dev_priv = adapter->algo_data;
++	int i, reg_offset;
++
++	if (bus->force_bit)
++		return intel_i2c_quirk_xfer(dev_priv,
++					    bus->force_bit, msgs, num);
++
++	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
++
++	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
++
++	for (i = 0; i < num; i++) {
++		u16 len = msgs[i].len;
++		u8 *buf = msgs[i].buf;
++
++		if (msgs[i].flags & I2C_M_RD) {
++			I915_WRITE(GMBUS1 + reg_offset,
++				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
++				   (len << GMBUS_BYTE_COUNT_SHIFT) |
++				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
++			POSTING_READ(GMBUS2+reg_offset);
++			do {
++				u32 val, loop = 0;
++
++				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++					goto timeout;
++				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++					return 0;
++
++				val = I915_READ(GMBUS3 + reg_offset);
++				do {
++					*buf++ = val & 0xff;
++					val >>= 8;
++				} while (--len && ++loop < 4);
++			} while (len);
++		} else {
++			u32 val, loop;
++
++			val = loop = 0;
++			do {
++				val |= *buf++ << (8 * loop);
++			} while (--len && ++loop < 4);
++
++			I915_WRITE(GMBUS3 + reg_offset, val);
++			I915_WRITE(GMBUS1 + reg_offset,
++				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
++				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
++				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
++			POSTING_READ(GMBUS2+reg_offset);
++
++			while (len) {
++				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++					goto timeout;
++				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++					return 0;
++
++				val = loop = 0;
++				do {
++					val |= *buf++ << (8 * loop);
++				} while (--len && ++loop < 4);
++
++				I915_WRITE(GMBUS3 + reg_offset, val);
++				POSTING_READ(GMBUS2+reg_offset);
++			}
++		}
++
++		if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
++			goto timeout;
++		if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++			return 0;
+ 	}
++
++	return num;
++
++timeout:
++	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
++		 bus->reg0 & 0xff, bus->adapter.name);
++	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
++	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
++	if (!bus->force_bit)
++		return -ENOMEM;
++
++	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+ }
+ 
++static u32 gmbus_func(struct i2c_adapter *adapter)
++{
++	struct intel_gmbus *bus = container_of(adapter,
++					       struct intel_gmbus,
++					       adapter);
++
++	if (bus->force_bit)
++		bus->force_bit->algo->functionality(bus->force_bit);
++
++	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++		/* I2C_FUNC_10BIT_ADDR | */
++		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
++}
++
++static const struct i2c_algorithm gmbus_algorithm = {
++	.master_xfer	= gmbus_xfer,
++	.functionality	= gmbus_func
++};
++
+ /**
+- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+  * @dev: DRM device
+- * @output: driver specific output device
+- * @reg: GPIO reg to use
+- * @name: name for this bus
+- * @slave_addr: slave address (if fixed)
+- *
+- * Creates and registers a new i2c bus with the Linux i2c layer, for use
+- * in output probing and control (e.g. DDC or SDVO control functions).
+- *
+- * Possible values for @reg include:
+- *   %GPIOA
+- *   %GPIOB
+- *   %GPIOC
+- *   %GPIOD
+- *   %GPIOE
+- *   %GPIOF
+- *   %GPIOG
+- *   %GPIOH
+- * see PRM for details on how these different busses are used.
+  */
+-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+-				     const char *name)
++int intel_setup_gmbus(struct drm_device *dev)
+ {
+-	struct intel_i2c_chan *chan;
++	static const char *names[GMBUS_NUM_PORTS] = {
++		"disabled",
++		"ssc",
++		"vga",
++		"panel",
++		"dpc",
++		"dpb",
++		"reserved"
++		"dpd",
++	};
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int ret, i;
+ 
+-	chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+-	if (!chan)
+-		goto out_free;
++	dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
++				  GFP_KERNEL);
++	if (dev_priv->gmbus == NULL)
++		return -ENOMEM;
+ 
+-	chan->drm_dev = dev;
+-	chan->reg = reg;
+-	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+-	chan->adapter.owner = THIS_MODULE;
+-	chan->adapter.algo_data	= &chan->algo;
+-	chan->adapter.dev.parent = &dev->pdev->dev;
+-	chan->algo.setsda = set_data;
+-	chan->algo.setscl = set_clock;
+-	chan->algo.getsda = get_data;
+-	chan->algo.getscl = get_clock;
+-	chan->algo.udelay = 20;
+-	chan->algo.timeout = usecs_to_jiffies(2200);
+-	chan->algo.data = chan;
+-
+-	i2c_set_adapdata(&chan->adapter, chan);
+-
+-	if(i2c_bit_add_bus(&chan->adapter))
+-		goto out_free;
++	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ 
+-	intel_i2c_reset_gmbus(dev);
++		bus->adapter.owner = THIS_MODULE;
++		bus->adapter.class = I2C_CLASS_DDC;
++		snprintf(bus->adapter.name,
++			 I2C_NAME_SIZE,
++			 "gmbus %s",
++			 names[i]);
+ 
+-	/* JJJ:  raise SCL and SDA? */
+-	intel_i2c_quirk_set(dev, true);
+-	set_data(chan, 1);
+-	set_clock(chan, 1);
+-	intel_i2c_quirk_set(dev, false);
+-	udelay(20);
++		bus->adapter.dev.parent = &dev->pdev->dev;
++		bus->adapter.algo_data	= dev_priv;
+ 
+-	return &chan->adapter;
++		bus->adapter.algo = &gmbus_algorithm;
++		ret = i2c_add_adapter(&bus->adapter);
++		if (ret)
++			goto err;
+ 
+-out_free:
+-	kfree(chan);
+-	return NULL;
++		/* By default use a conservative clock rate */
++		bus->reg0 = i | GMBUS_RATE_100KHZ;
++
++		/* XXX force bit banging until GMBUS is fully debugged */
++		bus->force_bit = intel_gpio_create(dev_priv, i);
++	}
++
++	intel_i2c_reset(dev_priv->dev);
++
++	return 0;
++
++err:
++	while (--i) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
++		i2c_del_adapter(&bus->adapter);
++	}
++	kfree(dev_priv->gmbus);
++	dev_priv->gmbus = NULL;
++	return ret;
+ }
+ 
+-/**
+- * intel_i2c_destroy - unregister and free i2c bus resources
+- * @output: channel to free
+- *
+- * Unregister the adapter from the i2c layer, then free the structure.
+- */
+-void intel_i2c_destroy(struct i2c_adapter *adapter)
++void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
++{
++	struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++	/* speed:
++	 * 0x0 = 100 KHz
++	 * 0x1 = 50 KHz
++	 * 0x2 = 400 KHz
++	 * 0x3 = 1000 Khz
++	 */
++	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
++}
++
++void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
++{
++	struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++	if (force_bit) {
++		if (bus->force_bit == NULL) {
++			struct drm_i915_private *dev_priv = adapter->algo_data;
++			bus->force_bit = intel_gpio_create(dev_priv,
++							   bus->reg0 & 0xff);
++		}
++	} else {
++		if (bus->force_bit) {
++			i2c_del_adapter(bus->force_bit);
++			kfree(bus->force_bit);
++			bus->force_bit = NULL;
++		}
++	}
++}
++
++void intel_teardown_gmbus(struct drm_device *dev)
+ {
+-	struct intel_i2c_chan *chan;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int i;
+ 
+-	if (!adapter)
++	if (dev_priv->gmbus == NULL)
+ 		return;
+ 
+-	chan = container_of(adapter,
+-			    struct intel_i2c_chan,
+-			    adapter);
+-	i2c_del_adapter(&chan->adapter);
+-	kfree(chan);
++	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
++		if (bus->force_bit) {
++			i2c_del_adapter(bus->force_bit);
++			kfree(bus->force_bit);
++		}
++		i2c_del_adapter(&bus->adapter);
++	}
++
++	kfree(dev_priv->gmbus);
++	dev_priv->gmbus = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 6ec39a8..4324a32 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -43,102 +43,76 @@
+ /* Private structure for the integrated LVDS support */
+ struct intel_lvds {
+ 	struct intel_encoder base;
++
++	struct edid *edid;
++
+ 	int fitting_mode;
+ 	u32 pfit_control;
+ 	u32 pfit_pgm_ratios;
++	bool pfit_dirty;
++
++	struct drm_display_mode *fixed_mode;
+ };
+ 
+-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
++static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
++	return container_of(encoder, struct intel_lvds, base.base);
+ }
+ 
+-/**
+- * Sets the backlight level.
+- *
+- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
+- */
+-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
++static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+ {
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 blc_pwm_ctl, reg;
+-
+-	if (HAS_PCH_SPLIT(dev))
+-		reg = BLC_PWM_CPU_CTL;
+-	else
+-		reg = BLC_PWM_CTL;
+-
+-	blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+-	I915_WRITE(reg, (blc_pwm_ctl |
+-				 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+-}
+-
+-/**
+- * Returns the maximum level of the backlight duty cycle field.
+- */
+-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+-{
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 reg;
+-
+-	if (HAS_PCH_SPLIT(dev))
+-		reg = BLC_PWM_PCH_CTL2;
+-	else
+-		reg = BLC_PWM_CTL;
+-
+-	return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+-		BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_lvds, base);
+ }
+ 
+ /**
+  * Sets the power state for the panel.
+  */
+-static void intel_lvds_set_power(struct drm_device *dev, bool on)
++static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+ {
++	struct drm_device *dev = intel_lvds->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 ctl_reg, status_reg, lvds_reg;
++	u32 ctl_reg, lvds_reg;
+ 
+ 	if (HAS_PCH_SPLIT(dev)) {
+ 		ctl_reg = PCH_PP_CONTROL;
+-		status_reg = PCH_PP_STATUS;
+ 		lvds_reg = PCH_LVDS;
+ 	} else {
+ 		ctl_reg = PP_CONTROL;
+-		status_reg = PP_STATUS;
+ 		lvds_reg = LVDS;
+ 	}
+ 
+ 	if (on) {
+ 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+-		POSTING_READ(lvds_reg);
+-
+-		I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
+-			   POWER_TARGET_ON);
+-		if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
+-			DRM_ERROR("timed out waiting to enable LVDS pipe");
+-
+-		intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
++		I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
++		intel_panel_set_backlight(dev, dev_priv->backlight_level);
+ 	} else {
+-		intel_lvds_set_backlight(dev, 0);
++		dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ 
+-		I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
+-			   ~POWER_TARGET_ON);
+-		if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
+-			DRM_ERROR("timed out waiting for LVDS pipe to turn off");
++		intel_panel_set_backlight(dev, 0);
++		I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
++
++		if (intel_lvds->pfit_control) {
++			if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
++				DRM_ERROR("timed out waiting for panel to power off\n");
++			I915_WRITE(PFIT_CONTROL, 0);
++			intel_lvds->pfit_control = 0;
++			intel_lvds->pfit_dirty = false;
++		}
+ 
+ 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+-		POSTING_READ(lvds_reg);
+ 	}
++	POSTING_READ(lvds_reg);
+ }
+ 
+ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+ {
+-	struct drm_device *dev = encoder->dev;
++	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ 
+ 	if (mode == DRM_MODE_DPMS_ON)
+-		intel_lvds_set_power(dev, true);
++		intel_lvds_set_power(intel_lvds, true);
+ 	else
+-		intel_lvds_set_power(dev, false);
++		intel_lvds_set_power(intel_lvds, false);
+ 
+ 	/* XXX: We never power down the LVDS pairs. */
+ }
+@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_lvds_mode_valid(struct drm_connector *connector,
+ 				 struct drm_display_mode *mode)
+ {
+-	struct drm_device *dev = connector->dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
++	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
++	struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+ 
+-	if (fixed_mode)	{
+-		if (mode->hdisplay > fixed_mode->hdisplay)
+-			return MODE_PANEL;
+-		if (mode->vdisplay > fixed_mode->vdisplay)
+-			return MODE_PANEL;
+-	}
++	if (mode->hdisplay > fixed_mode->hdisplay)
++		return MODE_PANEL;
++	if (mode->vdisplay > fixed_mode->vdisplay)
++		return MODE_PANEL;
+ 
+ 	return MODE_OK;
+ }
+@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+-	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ 	struct drm_encoder *tmp_encoder;
+ 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ 
+ 	/* Should never happen!! */
+-	if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
++	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ 		DRM_ERROR("Can't support LVDS on pipe A\n");
+ 		return false;
+ 	}
+@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 			return false;
+ 		}
+ 	}
+-	/* If we don't have a panel mode, there is nothing we can do */
+-	if (dev_priv->panel_fixed_mode == NULL)
+-		return true;
+ 
+ 	/*
+ 	 * We have timings from the BIOS for the panel, put them in
+@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	 * with the panel scaling set up to source from the H/VDisplay
+ 	 * of the original mode.
+ 	 */
+-	intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
++	intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+ 
+ 	if (HAS_PCH_SPLIT(dev)) {
+ 		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	}
+ 
+ 	/* Make sure pre-965s set dither correctly */
+-	if (!IS_I965G(dev)) {
+-		if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
++	if (INTEL_INFO(dev)->gen < 4) {
++		if (dev_priv->lvds_dither)
+ 			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ 	}
+ 
+@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 		goto out;
+ 
+ 	/* 965+ wants fuzzy fitting */
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ 				 PFIT_FILTER_FUZZY);
+ 
+@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 
+ 	case DRM_MODE_SCALE_ASPECT:
+ 		/* Scale but preserve the aspect ratio */
+-		if (IS_I965G(dev)) {
++		if (INTEL_INFO(dev)->gen >= 4) {
+ 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ 
+@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 		 * Fortunately this is all done for us in hw.
+ 		 */
+ 		pfit_control |= PFIT_ENABLE;
+-		if (IS_I965G(dev))
++		if (INTEL_INFO(dev)->gen >= 4)
+ 			pfit_control |= PFIT_SCALING_AUTO;
+ 		else
+ 			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	}
+ 
+ out:
+-	intel_lvds->pfit_control = pfit_control;
+-	intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
++	if (pfit_control != intel_lvds->pfit_control ||
++	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
++		intel_lvds->pfit_control = pfit_control;
++		intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
++		intel_lvds->pfit_dirty = true;
++	}
+ 	dev_priv->lvds_border_bits = border;
+ 
+ 	/*
+@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 reg;
+-
+-	if (HAS_PCH_SPLIT(dev))
+-		reg = BLC_PWM_CPU_CTL;
+-	else
+-		reg = BLC_PWM_CTL;
+-
+-	dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
+-	dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+-				       BACKLIGHT_DUTY_CYCLE_MASK);
++	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
++
++	dev_priv->backlight_level = intel_panel_get_backlight(dev);
++
++	/* We try to do the minimum that is necessary in order to unlock
++	 * the registers for mode setting.
++	 *
++	 * On Ironlake, this is quite simple as we just set the unlock key
++	 * and ignore all subtleties. (This may cause some issues...)
++	 *
++	 * Prior to Ironlake, we must disable the pipe if we want to adjust
++	 * the panel fitter. However at all other times we can just reset
++	 * the registers regardless.
++	 */
+ 
+-	intel_lvds_set_power(dev, false);
++	if (HAS_PCH_SPLIT(dev)) {
++		I915_WRITE(PCH_PP_CONTROL,
++			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
++	} else if (intel_lvds->pfit_dirty) {
++		I915_WRITE(PP_CONTROL,
++			   (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
++			   & ~POWER_TARGET_ON);
++	} else {
++		I915_WRITE(PP_CONTROL,
++			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
++	}
+ }
+ 
+-static void intel_lvds_commit( struct drm_encoder *encoder)
++static void intel_lvds_commit(struct drm_encoder *encoder)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ 
+-	if (dev_priv->backlight_duty_cycle == 0)
+-		dev_priv->backlight_duty_cycle =
+-			intel_lvds_get_max_backlight(dev);
++	if (dev_priv->backlight_level == 0)
++		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ 
+-	intel_lvds_set_power(dev, true);
++	/* Undo any unlocking done in prepare to prevent accidental
++	 * adjustment of the registers.
++	 */
++	if (HAS_PCH_SPLIT(dev)) {
++		u32 val = I915_READ(PCH_PP_CONTROL);
++		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
++			I915_WRITE(PCH_PP_CONTROL, val & 0x3);
++	} else {
++		u32 val = I915_READ(PP_CONTROL);
++		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
++			I915_WRITE(PP_CONTROL, val & 0x3);
++	}
++
++	/* Always do a full power on as we do not know what state
++	 * we were left in.
++	 */
++	intel_lvds_set_power(intel_lvds, true);
+ }
+ 
+ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ 
+ 	/*
+ 	 * The LVDS pin pair will already have been turned on in the
+@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ 	if (HAS_PCH_SPLIT(dev))
+ 		return;
+ 
++	if (!intel_lvds->pfit_dirty)
++		return;
++
+ 	/*
+ 	 * Enable automatic panel scaling so that non-native modes fill the
+ 	 * screen.  Should be enabled before the pipe is enabled, according to
+ 	 * register description and PRM.
+ 	 */
++	DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
++		      intel_lvds->pfit_control,
++		      intel_lvds->pfit_pgm_ratios);
++	if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
++		DRM_ERROR("timed out waiting for panel to power off\n");
++
+ 	I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ 	I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
++	intel_lvds->pfit_dirty = false;
+ }
+ 
+ /**
+@@ -465,38 +477,19 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
+  */
+ static int intel_lvds_get_modes(struct drm_connector *connector)
+ {
++	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ 	struct drm_device *dev = connector->dev;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int ret = 0;
+-
+-	if (dev_priv->lvds_edid_good) {
+-		ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+-
+-		if (ret)
+-			return ret;
+-	}
++	struct drm_display_mode *mode;
+ 
+-	/* Didn't get an EDID, so
+-	 * Set wide sync ranges so we get all modes
+-	 * handed to valid_mode for checking
+-	 */
+-	connector->display_info.min_vfreq = 0;
+-	connector->display_info.max_vfreq = 200;
+-	connector->display_info.min_hfreq = 0;
+-	connector->display_info.max_hfreq = 200;
+-
+-	if (dev_priv->panel_fixed_mode != NULL) {
+-		struct drm_display_mode *mode;
+-
+-		mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+-		drm_mode_probed_add(connector, mode);
++	if (intel_lvds->edid)
++		return drm_add_edid_modes(connector, intel_lvds->edid);
+ 
+-		return 1;
+-	}
++	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
++	if (mode == 0)
++		return 0;
+ 
+-	return 0;
++	drm_mode_probed_add(connector, mode);
++	return 1;
+ }
+ 
+ static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+@@ -587,18 +580,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
+ 				   struct drm_property *property,
+ 				   uint64_t value)
+ {
++	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ 	struct drm_device *dev = connector->dev;
+ 
+-	if (property == dev->mode_config.scaling_mode_property &&
+-				connector->encoder) {
+-		struct drm_crtc *crtc = connector->encoder->crtc;
+-		struct drm_encoder *encoder = connector->encoder;
+-		struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++	if (property == dev->mode_config.scaling_mode_property) {
++		struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+ 
+ 		if (value == DRM_MODE_SCALE_NONE) {
+ 			DRM_DEBUG_KMS("no scaling not supported\n");
+-			return 0;
++			return -EINVAL;
+ 		}
++
+ 		if (intel_lvds->fitting_mode == value) {
+ 			/* the LVDS scaling property is not changed */
+ 			return 0;
+@@ -628,7 +620,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ 	.get_modes = intel_lvds_get_modes,
+ 	.mode_valid = intel_lvds_mode_valid,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+@@ -726,16 +718,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+  * Find the reduced downclock for LVDS in EDID.
+  */
+ static void intel_find_lvds_downclock(struct drm_device *dev,
+-				struct drm_connector *connector)
++				      struct drm_display_mode *fixed_mode,
++				      struct drm_connector *connector)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_display_mode *scan, *panel_fixed_mode;
++	struct drm_display_mode *scan;
+ 	int temp_downclock;
+ 
+-	panel_fixed_mode = dev_priv->panel_fixed_mode;
+-	temp_downclock = panel_fixed_mode->clock;
+-
+-	mutex_lock(&dev->mode_config.mutex);
++	temp_downclock = fixed_mode->clock;
+ 	list_for_each_entry(scan, &connector->probed_modes, head) {
+ 		/*
+ 		 * If one mode has the same resolution with the fixed_panel
+@@ -744,14 +734,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+ 		 * case we can set the different FPx0/1 to dynamically select
+ 		 * between low and high frequency.
+ 		 */
+-		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+-			scan->hsync_start == panel_fixed_mode->hsync_start &&
+-			scan->hsync_end == panel_fixed_mode->hsync_end &&
+-			scan->htotal == panel_fixed_mode->htotal &&
+-			scan->vdisplay == panel_fixed_mode->vdisplay &&
+-			scan->vsync_start == panel_fixed_mode->vsync_start &&
+-			scan->vsync_end == panel_fixed_mode->vsync_end &&
+-			scan->vtotal == panel_fixed_mode->vtotal) {
++		if (scan->hdisplay == fixed_mode->hdisplay &&
++		    scan->hsync_start == fixed_mode->hsync_start &&
++		    scan->hsync_end == fixed_mode->hsync_end &&
++		    scan->htotal == fixed_mode->htotal &&
++		    scan->vdisplay == fixed_mode->vdisplay &&
++		    scan->vsync_start == fixed_mode->vsync_start &&
++		    scan->vsync_end == fixed_mode->vsync_end &&
++		    scan->vtotal == fixed_mode->vtotal) {
+ 			if (scan->clock < temp_downclock) {
+ 				/*
+ 				 * The downclock is already found. But we
+@@ -761,17 +751,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+ 			}
+ 		}
+ 	}
+-	mutex_unlock(&dev->mode_config.mutex);
+-	if (temp_downclock < panel_fixed_mode->clock &&
+-	    i915_lvds_downclock) {
++	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+ 		/* We found the downclock for LVDS. */
+ 		dev_priv->lvds_downclock_avail = 1;
+ 		dev_priv->lvds_downclock = temp_downclock;
+ 		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+-				"Normal clock %dKhz, downclock %dKhz\n",
+-				panel_fixed_mode->clock, temp_downclock);
++			      "Normal clock %dKhz, downclock %dKhz\n",
++			      fixed_mode->clock, temp_downclock);
+ 	}
+-	return;
+ }
+ 
+ /*
+@@ -780,38 +767,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+  * If it is present, return 1.
+  * If it is not present, return false.
+  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+- * Note: The addin_offset should also be checked for LVDS panel.
+- * Only when it is non-zero, it is assumed that it is present.
+  */
+-static int lvds_is_present_in_vbt(struct drm_device *dev)
++static bool lvds_is_present_in_vbt(struct drm_device *dev,
++				   u8 *i2c_pin)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct child_device_config *p_child;
+-	int i, ret;
++	int i;
+ 
+ 	if (!dev_priv->child_dev_num)
+-		return 1;
++		return true;
+ 
+-	ret = 0;
+ 	for (i = 0; i < dev_priv->child_dev_num; i++) {
+-		p_child = dev_priv->child_dev + i;
+-		/*
+-		 * If the device type is not LFP, continue.
+-		 * If the device type is 0x22, it is also regarded as LFP.
++		struct child_device_config *child = dev_priv->child_dev + i;
++
++		/* If the device type is not LFP, continue.
++		 * We have to check both the new identifiers as well as the
++		 * old for compatibility with some BIOSes.
+ 		 */
+-		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+-			p_child->device_type != DEVICE_TYPE_LFP)
++		if (child->device_type != DEVICE_TYPE_INT_LFP &&
++		    child->device_type != DEVICE_TYPE_LFP)
+ 			continue;
+ 
+-		/* The addin_offset should be checked. Only when it is
+-		 * non-zero, it is regarded as present.
++		if (child->i2c_pin)
++		    *i2c_pin = child->i2c_pin;
++
++		/* However, we cannot trust the BIOS writers to populate
++		 * the VBT correctly.  Since LVDS requires additional
++		 * information from AIM blocks, a non-zero addin offset is
++		 * a good indicator that the LVDS is actually present.
+ 		 */
+-		if (p_child->addin_offset) {
+-			ret = 1;
+-			break;
+-		}
++		if (child->addin_offset)
++			return true;
++
++		/* But even then some BIOS writers perform some black magic
++		 * and instantiate the device without reference to any
++		 * additional data.  Trust that if the VBT was written into
++		 * the OpRegion then they have validated the LVDS's existence.
++		 */
++		if (dev_priv->opregion.vbt)
++			return true;
+ 	}
+-	return ret;
++
++	return false;
++}
++
++static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u8 buf = 0;
++	struct i2c_msg msgs[] = {
++		{
++			.addr = 0xA0,
++			.flags = 0,
++			.len = 1,
++			.buf = &buf,
++		},
++	};
++	struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
++	/* XXX this only appears to work when using GMBUS */
++	if (intel_gmbus_is_forced_bit(i2c))
++		return true;
++	return i2c_transfer(i2c, msgs, 1) == 1;
+ }
+ 
+ /**
+@@ -832,13 +848,15 @@ void intel_lvds_init(struct drm_device *dev)
+ 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ 	struct drm_crtc *crtc;
+ 	u32 lvds;
+-	int pipe, gpio = GPIOC;
++	int pipe;
++	u8 pin;
+ 
+ 	/* Skip init on machines we know falsely report LVDS */
+ 	if (dmi_check_system(intel_no_lvds))
+ 		return;
+ 
+-	if (!lvds_is_present_in_vbt(dev)) {
++	pin = GMBUS_PORT_PANEL;
++	if (!lvds_is_present_in_vbt(dev, &pin)) {
+ 		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ 		return;
+ 	}
+@@ -846,11 +864,15 @@ void intel_lvds_init(struct drm_device *dev)
+ 	if (HAS_PCH_SPLIT(dev)) {
+ 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ 			return;
+-		if (dev_priv->edp_support) {
++		if (dev_priv->edp.support) {
+ 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ 			return;
+ 		}
+-		gpio = PCH_GPIOC;
++	}
++
++	if (!intel_lvds_ddc_probe(dev, pin)) {
++		DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
++		return;
+ 	}
+ 
+ 	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+@@ -864,16 +886,20 @@ void intel_lvds_init(struct drm_device *dev)
+ 		return;
+ 	}
+ 
++	if (!HAS_PCH_SPLIT(dev)) {
++		intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
++	}
++
+ 	intel_encoder = &intel_lvds->base;
+-	encoder = &intel_encoder->enc;
++	encoder = &intel_encoder->base;
+ 	connector = &intel_connector->base;
+ 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ 			   DRM_MODE_CONNECTOR_LVDS);
+ 
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
++	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ 			 DRM_MODE_ENCODER_LVDS);
+ 
+-	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
++	intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 	intel_encoder->type = INTEL_OUTPUT_LVDS;
+ 
+ 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+@@ -904,43 +930,50 @@ void intel_lvds_init(struct drm_device *dev)
+ 	 *    if closed, act like it's not there for now
+ 	 */
+ 
+-	/* Set up the DDC bus. */
+-	intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
+-	if (!intel_encoder->ddc_bus) {
+-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+-			   "failed.\n");
+-		goto failed;
+-	}
+-
+ 	/*
+ 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+ 	 * preferred mode is the right one.
+ 	 */
+-	dev_priv->lvds_edid_good = true;
+-
+-	if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
+-		dev_priv->lvds_edid_good = false;
++	intel_lvds->edid = drm_get_edid(connector,
++					&dev_priv->gmbus[pin].adapter);
++	if (intel_lvds->edid) {
++		if (drm_add_edid_modes(connector,
++				       intel_lvds->edid)) {
++			drm_mode_connector_update_edid_property(connector,
++								intel_lvds->edid);
++		} else {
++			kfree(intel_lvds->edid);
++			intel_lvds->edid = NULL;
++		}
++	}
++	if (!intel_lvds->edid) {
++		/* Didn't get an EDID, so
++		 * Set wide sync ranges so we get all modes
++		 * handed to valid_mode for checking
++		 */
++		connector->display_info.min_vfreq = 0;
++		connector->display_info.max_vfreq = 200;
++		connector->display_info.min_hfreq = 0;
++		connector->display_info.max_hfreq = 200;
++	}
+ 
+ 	list_for_each_entry(scan, &connector->probed_modes, head) {
+-		mutex_lock(&dev->mode_config.mutex);
+ 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+-			dev_priv->panel_fixed_mode =
++			intel_lvds->fixed_mode =
+ 				drm_mode_duplicate(dev, scan);
+-			mutex_unlock(&dev->mode_config.mutex);
+-			intel_find_lvds_downclock(dev, connector);
++			intel_find_lvds_downclock(dev,
++						  intel_lvds->fixed_mode,
++						  connector);
+ 			goto out;
+ 		}
+-		mutex_unlock(&dev->mode_config.mutex);
+ 	}
+ 
+ 	/* Failed to get EDID, what about VBT? */
+ 	if (dev_priv->lfp_lvds_vbt_mode) {
+-		mutex_lock(&dev->mode_config.mutex);
+-		dev_priv->panel_fixed_mode =
++		intel_lvds->fixed_mode =
+ 			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+-		mutex_unlock(&dev->mode_config.mutex);
+-		if (dev_priv->panel_fixed_mode) {
+-			dev_priv->panel_fixed_mode->type |=
++		if (intel_lvds->fixed_mode) {
++			intel_lvds->fixed_mode->type |=
+ 				DRM_MODE_TYPE_PREFERRED;
+ 			goto out;
+ 		}
+@@ -958,19 +991,19 @@ void intel_lvds_init(struct drm_device *dev)
+ 
+ 	lvds = I915_READ(LVDS);
+ 	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+-	crtc = intel_get_crtc_from_pipe(dev, pipe);
++	crtc = intel_get_crtc_for_pipe(dev, pipe);
+ 
+ 	if (crtc && (lvds & LVDS_PORT_EN)) {
+-		dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
+-		if (dev_priv->panel_fixed_mode) {
+-			dev_priv->panel_fixed_mode->type |=
++		intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
++		if (intel_lvds->fixed_mode) {
++			intel_lvds->fixed_mode->type |=
+ 				DRM_MODE_TYPE_PREFERRED;
+ 			goto out;
+ 		}
+ 	}
+ 
+ 	/* If we still don't have a mode after all that, give up. */
+-	if (!dev_priv->panel_fixed_mode)
++	if (!intel_lvds->fixed_mode)
+ 		goto failed;
+ 
+ out:
+@@ -997,8 +1030,6 @@ out:
+ 
+ failed:
+ 	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+-	if (intel_encoder->ddc_bus)
+-		intel_i2c_destroy(intel_encoder->ddc_bus);
+ 	drm_connector_cleanup(connector);
+ 	drm_encoder_cleanup(encoder);
+ 	kfree(intel_lvds);
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index 4b1fd3d..f70b7cf 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -1,6 +1,6 @@
+ /*
+  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+- * Copyright (c) 2007 Intel Corporation
++ * Copyright (c) 2007, 2010 Intel Corporation
+  *   Jesse Barnes <jesse.barnes@intel.com>
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -34,11 +34,11 @@
+  * intel_ddc_probe
+  *
+  */
+-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
++bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+ {
++	struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+ 	u8 out_buf[] = { 0x0, 0x0};
+ 	u8 buf[2];
+-	int ret;
+ 	struct i2c_msg msgs[] = {
+ 		{
+ 			.addr = 0x50,
+@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+ 		}
+ 	};
+ 
+-	intel_i2c_quirk_set(intel_encoder->enc.dev, true);
+-	ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
+-	intel_i2c_quirk_set(intel_encoder->enc.dev, false);
+-	if (ret == 2)
+-		return true;
+-
+-	return false;
++	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ }
+ 
+ /**
+@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
+ 	struct edid *edid;
+ 	int ret = 0;
+ 
+-	intel_i2c_quirk_set(connector->dev, true);
+ 	edid = drm_get_edid(connector, adapter);
+-	intel_i2c_quirk_set(connector->dev, false);
+ 	if (edid) {
+ 		drm_mode_connector_update_edid_property(connector, edid);
+ 		ret = drm_add_edid_modes(connector, edid);
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+new file mode 100644
+index 0000000..9b0d9a8
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -0,0 +1,517 @@
++/*
++ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
++ * Copyright 2008 Red Hat <mjg@redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/acpi.h>
++#include <acpi/video.h>
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include "intel_drv.h"
++
++#define PCI_ASLE 0xe4
++#define PCI_ASLS 0xfc
++
++#define OPREGION_HEADER_OFFSET 0
++#define OPREGION_ACPI_OFFSET   0x100
++#define OPREGION_SWSCI_OFFSET  0x200
++#define OPREGION_ASLE_OFFSET   0x300
++#define OPREGION_VBT_OFFSET    0x400
++
++#define OPREGION_SIGNATURE "IntelGraphicsMem"
++#define MBOX_ACPI      (1<<0)
++#define MBOX_SWSCI     (1<<1)
++#define MBOX_ASLE      (1<<2)
++
++struct opregion_header {
++       u8 signature[16];
++       u32 size;
++       u32 opregion_ver;
++       u8 bios_ver[32];
++       u8 vbios_ver[16];
++       u8 driver_ver[16];
++       u32 mboxes;
++       u8 reserved[164];
++} __attribute__((packed));
++
++/* OpRegion mailbox #1: public ACPI methods */
++struct opregion_acpi {
++       u32 drdy;       /* driver readiness */
++       u32 csts;       /* notification status */
++       u32 cevt;       /* current event */
++       u8 rsvd1[20];
++       u32 didl[8];    /* supported display devices ID list */
++       u32 cpdl[8];    /* currently presented display list */
++       u32 cadl[8];    /* currently active display list */
++       u32 nadl[8];    /* next active devices list */
++       u32 aslp;       /* ASL sleep time-out */
++       u32 tidx;       /* toggle table index */
++       u32 chpd;       /* current hotplug enable indicator */
++       u32 clid;       /* current lid state*/
++       u32 cdck;       /* current docking state */
++       u32 sxsw;       /* Sx state resume */
++       u32 evts;       /* ASL supported events */
++       u32 cnot;       /* current OS notification */
++       u32 nrdy;       /* driver status */
++       u8 rsvd2[60];
++} __attribute__((packed));
++
++/* OpRegion mailbox #2: SWSCI */
++struct opregion_swsci {
++       u32 scic;       /* SWSCI command|status|data */
++       u32 parm;       /* command parameters */
++       u32 dslp;       /* driver sleep time-out */
++       u8 rsvd[244];
++} __attribute__((packed));
++
++/* OpRegion mailbox #3: ASLE */
++struct opregion_asle {
++       u32 ardy;       /* driver readiness */
++       u32 aslc;       /* ASLE interrupt command */
++       u32 tche;       /* technology enabled indicator */
++       u32 alsi;       /* current ALS illuminance reading */
++       u32 bclp;       /* backlight brightness to set */
++       u32 pfit;       /* panel fitting state */
++       u32 cblv;       /* current brightness level */
++       u16 bclm[20];   /* backlight level duty cycle mapping table */
++       u32 cpfm;       /* current panel fitting mode */
++       u32 epfm;       /* enabled panel fitting modes */
++       u8 plut[74];    /* panel LUT and identifier */
++       u32 pfmb;       /* PWM freq and min brightness */
++       u8 rsvd[102];
++} __attribute__((packed));
++
++/* ASLE irq request bits */
++#define ASLE_SET_ALS_ILLUM     (1 << 0)
++#define ASLE_SET_BACKLIGHT     (1 << 1)
++#define ASLE_SET_PFIT          (1 << 2)
++#define ASLE_SET_PWM_FREQ      (1 << 3)
++#define ASLE_REQ_MSK           0xf
++
++/* response bits of ASLE irq request */
++#define ASLE_ALS_ILLUM_FAILED	(1<<10)
++#define ASLE_BACKLIGHT_FAILED	(1<<12)
++#define ASLE_PFIT_FAILED	(1<<14)
++#define ASLE_PWM_FREQ_FAILED	(1<<16)
++
++/* ASLE backlight brightness to set */
++#define ASLE_BCLP_VALID                (1<<31)
++#define ASLE_BCLP_MSK          (~(1<<31))
++
++/* ASLE panel fitting request */
++#define ASLE_PFIT_VALID         (1<<31)
++#define ASLE_PFIT_CENTER (1<<0)
++#define ASLE_PFIT_STRETCH_TEXT (1<<1)
++#define ASLE_PFIT_STRETCH_GFX (1<<2)
++
++/* PWM frequency and minimum brightness */
++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
++#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
++#define ASLE_PFMB_PWM_VALID (1<<31)
++
++#define ASLE_CBLV_VALID         (1<<31)
++
++#define ACPI_OTHER_OUTPUT (0<<8)
++#define ACPI_VGA_OUTPUT (1<<8)
++#define ACPI_TV_OUTPUT (2<<8)
++#define ACPI_DIGITAL_OUTPUT (3<<8)
++#define ACPI_LVDS_OUTPUT (4<<8)
++
++#ifdef CONFIG_ACPI
++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++	u32 max;
++
++	if (!(bclp & ASLE_BCLP_VALID))
++		return ASLE_BACKLIGHT_FAILED;
++
++	bclp &= ASLE_BCLP_MSK;
++	if (bclp > 255)
++		return ASLE_BACKLIGHT_FAILED;
++
++	max = intel_panel_get_max_backlight(dev);
++	intel_panel_set_backlight(dev, bclp * max / 255);
++	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++
++	return 0;
++}
++
++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
++{
++	/* alsi is the current ALS reading in lux. 0 indicates below sensor
++	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
++	return 0;
++}
++
++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	if (pfmb & ASLE_PFMB_PWM_VALID) {
++		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
++		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
++		pwm = pwm >> 9;
++		/* FIXME - what do we do with the PWM? */
++	}
++	return 0;
++}
++
++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
++{
++	/* Panel fitting is currently controlled by the X code, so this is a
++	   noop until modesetting support works fully */
++	if (!(pfit & ASLE_PFIT_VALID))
++		return ASLE_PFIT_FAILED;
++	return 0;
++}
++
++void intel_opregion_asle_intr(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++	u32 asle_stat = 0;
++	u32 asle_req;
++
++	if (!asle)
++		return;
++
++	asle_req = asle->aslc & ASLE_REQ_MSK;
++
++	if (!asle_req) {
++		DRM_DEBUG_DRIVER("non asle set request??\n");
++		return;
++	}
++
++	if (asle_req & ASLE_SET_ALS_ILLUM)
++		asle_stat |= asle_set_als_illum(dev, asle->alsi);
++
++	if (asle_req & ASLE_SET_BACKLIGHT)
++		asle_stat |= asle_set_backlight(dev, asle->bclp);
++
++	if (asle_req & ASLE_SET_PFIT)
++		asle_stat |= asle_set_pfit(dev, asle->pfit);
++
++	if (asle_req & ASLE_SET_PWM_FREQ)
++		asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
++
++	asle->aslc = asle_stat;
++}
++
++/* Only present on Ironlake+ */
++void intel_opregion_gse_intr(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++	u32 asle_stat = 0;
++	u32 asle_req;
++
++	if (!asle)
++		return;
++
++	asle_req = asle->aslc & ASLE_REQ_MSK;
++
++	if (!asle_req) {
++		DRM_DEBUG_DRIVER("non asle set request??\n");
++		return;
++	}
++
++	if (asle_req & ASLE_SET_ALS_ILLUM) {
++		DRM_DEBUG_DRIVER("Illum is not supported\n");
++		asle_stat |= ASLE_ALS_ILLUM_FAILED;
++	}
++
++	if (asle_req & ASLE_SET_BACKLIGHT)
++		asle_stat |= asle_set_backlight(dev, asle->bclp);
++
++	if (asle_req & ASLE_SET_PFIT) {
++		DRM_DEBUG_DRIVER("Pfit is not supported\n");
++		asle_stat |= ASLE_PFIT_FAILED;
++	}
++
++	if (asle_req & ASLE_SET_PWM_FREQ) {
++		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
++		asle_stat |= ASLE_PWM_FREQ_FAILED;
++	}
++
++	asle->aslc = asle_stat;
++}
++#define ASLE_ALS_EN    (1<<0)
++#define ASLE_BLC_EN    (1<<1)
++#define ASLE_PFIT_EN   (1<<2)
++#define ASLE_PFMB_EN   (1<<3)
++
++void intel_opregion_enable_asle(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++
++	if (asle) {
++		if (IS_MOBILE(dev)) {
++			unsigned long irqflags;
++
++			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
++			intel_enable_asle(dev);
++			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
++					       irqflags);
++		}
++
++		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
++			ASLE_PFMB_EN;
++		asle->ardy = 1;
++	}
++}
++
++#define ACPI_EV_DISPLAY_SWITCH (1<<0)
++#define ACPI_EV_LID            (1<<1)
++#define ACPI_EV_DOCK           (1<<2)
++
++static struct intel_opregion *system_opregion;
++
++static int intel_opregion_video_event(struct notifier_block *nb,
++				      unsigned long val, void *data)
++{
++	/* The only video events relevant to opregion are 0x80. These indicate
++	   either a docking event, lid switch or display switch request. In
++	   Linux, these are handled by the dock, button and video drivers.
++	   We might want to fix the video driver to be opregion-aware in
++	   future, but right now we just indicate to the firmware that the
++	   request has been handled */
++
++	struct opregion_acpi *acpi;
++
++	if (!system_opregion)
++		return NOTIFY_DONE;
++
++	acpi = system_opregion->acpi;
++	acpi->csts = 0;
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block intel_opregion_notifier = {
++	.notifier_call = intel_opregion_video_event,
++};
++
++/*
++ * Initialise the DIDL field in opregion. This passes a list of devices to
++ * the firmware. Values are defined by section B.4.2 of the ACPI specification
++ * (version 3)
++ */
++
++static void intel_didl_outputs(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
++	struct drm_connector *connector;
++	acpi_handle handle;
++	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
++	unsigned long long device_id;
++	acpi_status status;
++	int i = 0;
++
++	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
++	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
++		return;
++
++	if (acpi_is_video_device(acpi_dev))
++		acpi_video_bus = acpi_dev;
++	else {
++		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
++			if (acpi_is_video_device(acpi_cdev)) {
++				acpi_video_bus = acpi_cdev;
++				break;
++			}
++		}
++	}
++
++	if (!acpi_video_bus) {
++		printk(KERN_WARNING "No ACPI video bus found\n");
++		return;
++	}
++
++	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
++		if (i >= 8) {
++			dev_printk (KERN_ERR, &dev->pdev->dev,
++				    "More than 8 outputs detected\n");
++			return;
++		}
++		status =
++			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
++						NULL, &device_id);
++		if (ACPI_SUCCESS(status)) {
++			if (!device_id)
++				goto blind_set;
++			opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
++			i++;
++		}
++	}
++
++end:
++	/* If fewer than 8 outputs, the list must be null terminated */
++	if (i < 8)
++		opregion->acpi->didl[i] = 0;
++	return;
++
++blind_set:
++	i = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		int output_type = ACPI_OTHER_OUTPUT;
++		if (i >= 8) {
++			dev_printk (KERN_ERR, &dev->pdev->dev,
++				    "More than 8 outputs detected\n");
++			return;
++		}
++		switch (connector->connector_type) {
++		case DRM_MODE_CONNECTOR_VGA:
++		case DRM_MODE_CONNECTOR_DVIA:
++			output_type = ACPI_VGA_OUTPUT;
++			break;
++		case DRM_MODE_CONNECTOR_Composite:
++		case DRM_MODE_CONNECTOR_SVIDEO:
++		case DRM_MODE_CONNECTOR_Component:
++		case DRM_MODE_CONNECTOR_9PinDIN:
++			output_type = ACPI_TV_OUTPUT;
++			break;
++		case DRM_MODE_CONNECTOR_DVII:
++		case DRM_MODE_CONNECTOR_DVID:
++		case DRM_MODE_CONNECTOR_DisplayPort:
++		case DRM_MODE_CONNECTOR_HDMIA:
++		case DRM_MODE_CONNECTOR_HDMIB:
++			output_type = ACPI_DIGITAL_OUTPUT;
++			break;
++		case DRM_MODE_CONNECTOR_LVDS:
++			output_type = ACPI_LVDS_OUTPUT;
++			break;
++		}
++		opregion->acpi->didl[i] |= (1<<31) | output_type | i;
++		i++;
++	}
++	goto end;
++}
++
++void intel_opregion_init(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
++
++	if (!opregion->header)
++		return;
++
++	if (opregion->acpi) {
++		if (drm_core_check_feature(dev, DRIVER_MODESET))
++			intel_didl_outputs(dev);
++
++		/* Notify BIOS we are ready to handle ACPI video ext notifs.
++		 * Right now, all the events are handled by the ACPI video module.
++		 * We don't actually need to do anything with them. */
++		opregion->acpi->csts = 0;
++		opregion->acpi->drdy = 1;
++
++		system_opregion = opregion;
++		register_acpi_notifier(&intel_opregion_notifier);
++	}
++
++	if (opregion->asle)
++		intel_opregion_enable_asle(dev);
++}
++
++void intel_opregion_fini(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
++
++	if (!opregion->header)
++		return;
++
++	if (opregion->acpi) {
++		opregion->acpi->drdy = 0;
++
++		system_opregion = NULL;
++		unregister_acpi_notifier(&intel_opregion_notifier);
++	}
++
++	/* just clear all opregion memory pointers now */
++	iounmap(opregion->header);
++	opregion->header = NULL;
++	opregion->acpi = NULL;
++	opregion->swsci = NULL;
++	opregion->asle = NULL;
++	opregion->vbt = NULL;
++}
++#endif
++
++int intel_opregion_setup(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_opregion *opregion = &dev_priv->opregion;
++	void *base;
++	u32 asls, mboxes;
++	int err = 0;
++
++	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
++	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
++	if (asls == 0) {
++		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
++		return -ENOTSUPP;
++	}
++
++	base = ioremap(asls, OPREGION_SIZE);
++	if (!base)
++		return -ENOMEM;
++
++	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
++		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
++		err = -EINVAL;
++		goto err_out;
++	}
++	opregion->header = base;
++	opregion->vbt = base + OPREGION_VBT_OFFSET;
++
++	mboxes = opregion->header->mboxes;
++	if (mboxes & MBOX_ACPI) {
++		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
++		opregion->acpi = base + OPREGION_ACPI_OFFSET;
++	}
++
++	if (mboxes & MBOX_SWSCI) {
++		DRM_DEBUG_DRIVER("SWSCI supported\n");
++		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
++	}
++	if (mboxes & MBOX_ASLE) {
++		DRM_DEBUG_DRIVER("ASLE supported\n");
++		opregion->asle = base + OPREGION_ASLE_OFFSET;
++	}
++
++	return 0;
++
++err_out:
++	iounmap(base);
++	return err;
++}
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index 1d306a4..5b513ea 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -170,57 +170,143 @@ struct overlay_registers {
+     u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+ };
+ 
+-/* overlay flip addr flag */
+-#define OFC_UPDATE		0x1
+-
+-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
+-
++struct intel_overlay {
++	struct drm_device *dev;
++	struct intel_crtc *crtc;
++	struct drm_i915_gem_object *vid_bo;
++	struct drm_i915_gem_object *old_vid_bo;
++	int active;
++	int pfit_active;
++	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
++	u32 color_key;
++	u32 brightness, contrast, saturation;
++	u32 old_xscale, old_yscale;
++	/* register access */
++	u32 flip_addr;
++	struct drm_i915_gem_object *reg_bo;
++	/* flip handling */
++	uint32_t last_flip_req;
++	void (*flip_tail)(struct intel_overlay *);
++};
+ 
+-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
++static struct overlay_registers *
++intel_overlay_map_regs(struct intel_overlay *overlay)
+ {
+         drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ 	struct overlay_registers *regs;
+ 
+-	/* no recursive mappings */
+-	BUG_ON(overlay->virt_addr);
++	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++		regs = overlay->reg_bo->phys_obj->handle->vaddr;
++	else
++		regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
++					 overlay->reg_bo->gtt_offset);
+ 
+-	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+-		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+-						overlay->reg_bo->gtt_offset,
+-						KM_USER0);
++	return regs;
++}
+ 
+-		if (!regs) {
+-			DRM_ERROR("failed to map overlay regs in GTT\n");
+-			return NULL;
+-		}
+-	} else
+-		regs = overlay->reg_bo->phys_obj->handle->vaddr;
++static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
++				     struct overlay_registers *regs)
++{
++	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++		io_mapping_unmap(regs);
++}
+ 
+-	return overlay->virt_addr = regs;
++static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
++					 struct drm_i915_gem_request *request,
++					 bool interruptible,
++					 void (*tail)(struct intel_overlay *))
++{
++	struct drm_device *dev = overlay->dev;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++
++	BUG_ON(overlay->last_flip_req);
++	overlay->last_flip_req =
++		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
++	if (overlay->last_flip_req == 0)
++		return -ENOMEM;
++
++	overlay->flip_tail = tail;
++	ret = i915_do_wait_request(dev,
++				   overlay->last_flip_req, true,
++				   &dev_priv->render_ring);
++	if (ret)
++		return ret;
++
++	overlay->last_flip_req = 0;
++	return 0;
+ }
+ 
+-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
++/* Workaround for i830 bug where pipe a must be enable to change control regs */
++static int
++i830_activate_pipe_a(struct drm_device *dev)
+ {
+-	if (OVERLAY_NONPHYSICAL(overlay->dev))
+-		io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_crtc *crtc;
++	struct drm_crtc_helper_funcs *crtc_funcs;
++	struct drm_display_mode vesa_640x480 = {
++		DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
++			 752, 800, 0, 480, 489, 492, 525, 0,
++			 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
++	}, *mode;
++
++	crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
++	if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
++		return 0;
+ 
+-	overlay->virt_addr = NULL;
++	/* most i8xx have pipe a forced on, so don't trust dpms mode */
++	if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
++		return 0;
+ 
+-	return;
++	crtc_funcs = crtc->base.helper_private;
++	if (crtc_funcs->dpms == NULL)
++		return 0;
++
++	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
++
++	mode = drm_mode_duplicate(dev, &vesa_640x480);
++	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++	if(!drm_crtc_helper_set_mode(&crtc->base, mode,
++				       crtc->base.x, crtc->base.y,
++				       crtc->base.fb))
++		return 0;
++
++	crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
++	return 1;
++}
++
++static void
++i830_deactivate_pipe_a(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ 
+ /* overlay needs to be disable in OCMD reg */
+ static int intel_overlay_on(struct intel_overlay *overlay)
+ {
+ 	struct drm_device *dev = overlay->dev;
++	struct drm_i915_gem_request *request;
++	int pipe_a_quirk = 0;
+ 	int ret;
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+ 	BUG_ON(overlay->active);
+-
+ 	overlay->active = 1;
+-	overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
++
++	if (IS_I830(dev)) {
++		pipe_a_quirk = i830_activate_pipe_a(dev);
++		if (pipe_a_quirk < 0)
++			return pipe_a_quirk;
++	}
++
++	request = kzalloc(sizeof(*request), GFP_KERNEL);
++	if (request == NULL) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	BEGIN_LP_RING(4);
+ 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
+ 	OUT_RING(MI_NOOP);
+ 	ADVANCE_LP_RING();
+ 
+-	overlay->last_flip_req =
+-		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+-	if (overlay->last_flip_req == 0)
+-		return -ENOMEM;
+-
+-	ret = i915_do_wait_request(dev,
+-			overlay->last_flip_req, 1, &dev_priv->render_ring);
+-	if (ret != 0)
+-		return ret;
++	ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
++out:
++	if (pipe_a_quirk)
++		i830_deactivate_pipe_a(dev);
+ 
+-	overlay->hw_wedged = 0;
+-	overlay->last_flip_req = 0;
+-	return 0;
++	return ret;
+ }
+ 
+ /* overlay needs to be enabled in OCMD reg */
+-static void intel_overlay_continue(struct intel_overlay *overlay,
+-			    bool load_polyphase_filter)
++static int intel_overlay_continue(struct intel_overlay *overlay,
++				  bool load_polyphase_filter)
+ {
+ 	struct drm_device *dev = overlay->dev;
+         drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_request *request;
+ 	u32 flip_addr = overlay->flip_addr;
+ 	u32 tmp;
+ 
+ 	BUG_ON(!overlay->active);
+ 
++	request = kzalloc(sizeof(*request), GFP_KERNEL);
++	if (request == NULL)
++		return -ENOMEM;
++
+ 	if (load_polyphase_filter)
+ 		flip_addr |= OFC_UPDATE;
+ 
+@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
+         ADVANCE_LP_RING();
+ 
+ 	overlay->last_flip_req =
+-		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
++		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
++	return 0;
+ }
+ 
+-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
++static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+ {
+-	struct drm_device *dev = overlay->dev;
+-        drm_i915_private_t *dev_priv = dev->dev_private;
+-	int ret;
+-	u32 tmp;
+-
+-	if (overlay->last_flip_req != 0) {
+-		ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-				1, &dev_priv->render_ring);
+-		if (ret == 0) {
+-			overlay->last_flip_req = 0;
+-
+-			tmp = I915_READ(ISR);
++	struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+ 
+-			if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+-				return 0;
+-		}
+-	}
++	i915_gem_object_unpin(obj);
++	drm_gem_object_unreference(obj);
+ 
+-	/* synchronous slowpath */
+-	overlay->hw_wedged = RELEASE_OLD_VID;
++	overlay->old_vid_bo = NULL;
++}
+ 
+-	BEGIN_LP_RING(2);
+-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+-        OUT_RING(MI_NOOP);
+-        ADVANCE_LP_RING();
++static void intel_overlay_off_tail(struct intel_overlay *overlay)
++{
++	struct drm_gem_object *obj;
+ 
+-	overlay->last_flip_req =
+-		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+-	if (overlay->last_flip_req == 0)
+-		return -ENOMEM;
++	/* never have the overlay hw on without showing a frame */
++	BUG_ON(!overlay->vid_bo);
++	obj = &overlay->vid_bo->base;
+ 
+-	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-			1, &dev_priv->render_ring);
+-	if (ret != 0)
+-		return ret;
++	i915_gem_object_unpin(obj);
++	drm_gem_object_unreference(obj);
++	overlay->vid_bo = NULL;
+ 
+-	overlay->hw_wedged = 0;
+-	overlay->last_flip_req = 0;
+-	return 0;
++	overlay->crtc->overlay = NULL;
++	overlay->crtc = NULL;
++	overlay->active = 0;
+ }
+ 
+ /* overlay needs to be disabled in OCMD reg */
+-static int intel_overlay_off(struct intel_overlay *overlay)
++static int intel_overlay_off(struct intel_overlay *overlay,
++			     bool interruptible)
+ {
+-	u32 flip_addr = overlay->flip_addr;
+ 	struct drm_device *dev = overlay->dev;
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int ret;
++	u32 flip_addr = overlay->flip_addr;
++	struct drm_i915_gem_request *request;
+ 
+ 	BUG_ON(!overlay->active);
+ 
++	request = kzalloc(sizeof(*request), GFP_KERNEL);
++	if (request == NULL)
++		return -ENOMEM;
++
+ 	/* According to intel docs the overlay hw may hang (when switching
+ 	 * off) without loading the filter coeffs. It is however unclear whether
+ 	 * this applies to the disabling of the overlay or to the switching off
+ 	 * of the hw. Do it in both cases */
+ 	flip_addr |= OFC_UPDATE;
+ 
++	BEGIN_LP_RING(6);
+ 	/* wait for overlay to go idle */
+-	overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+-
+-	BEGIN_LP_RING(4);
+ 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ 	OUT_RING(flip_addr);
+-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+-        OUT_RING(MI_NOOP);
+-        ADVANCE_LP_RING();
+-
+-	overlay->last_flip_req =
+-		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+-	if (overlay->last_flip_req == 0)
+-		return -ENOMEM;
+-
+-	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-			1, &dev_priv->render_ring);
+-	if (ret != 0)
+-		return ret;
+-
++	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ 	/* turn overlay off */
+-	overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+-
+-	BEGIN_LP_RING(4);
+-        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ 	OUT_RING(flip_addr);
+-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+-        OUT_RING(MI_NOOP);
++	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ 	ADVANCE_LP_RING();
+ 
+-	overlay->last_flip_req =
+-		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+-	if (overlay->last_flip_req == 0)
+-		return -ENOMEM;
+-
+-	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-			1, &dev_priv->render_ring);
+-	if (ret != 0)
+-		return ret;
+-
+-	overlay->hw_wedged = 0;
+-	overlay->last_flip_req = 0;
+-	return ret;
+-}
+-
+-static void intel_overlay_off_tail(struct intel_overlay *overlay)
+-{
+-	struct drm_gem_object *obj;
+-
+-	/* never have the overlay hw on without showing a frame */
+-	BUG_ON(!overlay->vid_bo);
+-	obj = &overlay->vid_bo->base;
+-
+-	i915_gem_object_unpin(obj);
+-	drm_gem_object_unreference(obj);
+-	overlay->vid_bo = NULL;
+-
+-	overlay->crtc->overlay = NULL;
+-	overlay->crtc = NULL;
+-	overlay->active = 0;
++	return intel_overlay_do_wait_request(overlay, request, interruptible,
++					     intel_overlay_off_tail);
+ }
+ 
+ /* recover from an interruption due to a signal
+  * We have to be careful not to repeat work forever an make forward progess. */
+-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+-					 int interruptible)
++static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
++						bool interruptible)
+ {
+ 	struct drm_device *dev = overlay->dev;
+-	struct drm_gem_object *obj;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	u32 flip_addr;
+ 	int ret;
+ 
+-	if (overlay->hw_wedged == HW_WEDGED)
+-		return -EIO;
+-
+-	if (overlay->last_flip_req == 0) {
+-		overlay->last_flip_req =
+-			i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+-		if (overlay->last_flip_req == 0)
+-			return -ENOMEM;
+-	}
++	if (overlay->last_flip_req == 0)
++		return 0;
+ 
+ 	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-			interruptible, &dev_priv->render_ring);
+-	if (ret != 0)
++				   interruptible, &dev_priv->render_ring);
++	if (ret)
+ 		return ret;
+ 
+-	switch (overlay->hw_wedged) {
+-		case RELEASE_OLD_VID:
+-			obj = &overlay->old_vid_bo->base;
+-			i915_gem_object_unpin(obj);
+-			drm_gem_object_unreference(obj);
+-			overlay->old_vid_bo = NULL;
+-			break;
+-		case SWITCH_OFF_STAGE_1:
+-			flip_addr = overlay->flip_addr;
+-			flip_addr |= OFC_UPDATE;
+-
+-			overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+-
+-			BEGIN_LP_RING(4);
+-			OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+-			OUT_RING(flip_addr);
+-			OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+-			OUT_RING(MI_NOOP);
+-			ADVANCE_LP_RING();
+-
+-			overlay->last_flip_req = i915_add_request(dev, NULL,
+-					0, &dev_priv->render_ring);
+-			if (overlay->last_flip_req == 0)
+-				return -ENOMEM;
+-
+-			ret = i915_do_wait_request(dev, overlay->last_flip_req,
+-					interruptible, &dev_priv->render_ring);
+-			if (ret != 0)
+-				return ret;
+-
+-		case SWITCH_OFF_STAGE_2:
+-			intel_overlay_off_tail(overlay);
+-			break;
+-		default:
+-			BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+-	}
++	if (overlay->flip_tail)
++		overlay->flip_tail(overlay);
+ 
+-	overlay->hw_wedged = 0;
+ 	overlay->last_flip_req = 0;
+ 	return 0;
+ }
+ 
+ /* Wait for pending overlay flip and release old frame.
+  * Needs to be called before the overlay register are changed
+- * via intel_overlay_(un)map_regs_atomic */
++ * via intel_overlay_(un)map_regs
++ */
+ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+ {
++	struct drm_device *dev = overlay->dev;
++	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret;
+-	struct drm_gem_object *obj;
+ 
+-	/* only wait if there is actually an old frame to release to
+-	 * guarantee forward progress */
++	/* Only wait if there is actually an old frame to release to
++	 * guarantee forward progress.
++	 */
+ 	if (!overlay->old_vid_bo)
+ 		return 0;
+ 
+-	ret = intel_overlay_wait_flip(overlay);
+-	if (ret != 0)
+-		return ret;
++	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
++		struct drm_i915_gem_request *request;
+ 
+-	obj = &overlay->old_vid_bo->base;
+-	i915_gem_object_unpin(obj);
+-	drm_gem_object_unreference(obj);
+-	overlay->old_vid_bo = NULL;
++		/* synchronous slowpath */
++		request = kzalloc(sizeof(*request), GFP_KERNEL);
++		if (request == NULL)
++			return -ENOMEM;
++
++		BEGIN_LP_RING(2);
++		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++		OUT_RING(MI_NOOP);
++		ADVANCE_LP_RING();
++
++		ret = intel_overlay_do_wait_request(overlay, request, true,
++						    intel_overlay_release_old_vid_tail);
++		if (ret)
++			return ret;
++	}
+ 
++	intel_overlay_release_old_vid_tail(overlay);
+ 	return 0;
+ }
+ 
+@@ -506,65 +502,65 @@ struct put_image_params {
+ static int packed_depth_bytes(u32 format)
+ {
+ 	switch (format & I915_OVERLAY_DEPTH_MASK) {
+-		case I915_OVERLAY_YUV422:
+-			return 4;
+-		case I915_OVERLAY_YUV411:
+-			/* return 6; not implemented */
+-		default:
+-			return -EINVAL;
++	case I915_OVERLAY_YUV422:
++		return 4;
++	case I915_OVERLAY_YUV411:
++		/* return 6; not implemented */
++	default:
++		return -EINVAL;
+ 	}
+ }
+ 
+ static int packed_width_bytes(u32 format, short width)
+ {
+ 	switch (format & I915_OVERLAY_DEPTH_MASK) {
+-		case I915_OVERLAY_YUV422:
+-			return width << 1;
+-		default:
+-			return -EINVAL;
++	case I915_OVERLAY_YUV422:
++		return width << 1;
++	default:
++		return -EINVAL;
+ 	}
+ }
+ 
+ static int uv_hsubsampling(u32 format)
+ {
+ 	switch (format & I915_OVERLAY_DEPTH_MASK) {
+-		case I915_OVERLAY_YUV422:
+-		case I915_OVERLAY_YUV420:
+-			return 2;
+-		case I915_OVERLAY_YUV411:
+-		case I915_OVERLAY_YUV410:
+-			return 4;
+-		default:
+-			return -EINVAL;
++	case I915_OVERLAY_YUV422:
++	case I915_OVERLAY_YUV420:
++		return 2;
++	case I915_OVERLAY_YUV411:
++	case I915_OVERLAY_YUV410:
++		return 4;
++	default:
++		return -EINVAL;
+ 	}
+ }
+ 
+ static int uv_vsubsampling(u32 format)
+ {
+ 	switch (format & I915_OVERLAY_DEPTH_MASK) {
+-		case I915_OVERLAY_YUV420:
+-		case I915_OVERLAY_YUV410:
+-			return 2;
+-		case I915_OVERLAY_YUV422:
+-		case I915_OVERLAY_YUV411:
+-			return 1;
+-		default:
+-			return -EINVAL;
++	case I915_OVERLAY_YUV420:
++	case I915_OVERLAY_YUV410:
++		return 2;
++	case I915_OVERLAY_YUV422:
++	case I915_OVERLAY_YUV411:
++		return 1;
++	default:
++		return -EINVAL;
+ 	}
+ }
+ 
+ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+ {
+ 	u32 mask, shift, ret;
+-	if (IS_I9XX(dev)) {
+-		mask = 0x3f;
+-		shift = 6;
+-	} else {
++	if (IS_GEN2(dev)) {
+ 		mask = 0x1f;
+ 		shift = 5;
++	} else {
++		mask = 0x3f;
++		shift = 6;
+ 	}
+ 	ret = ((offset + width + mask) >> shift) - (offset >> shift);
+-	if (IS_I9XX(dev))
++	if (!IS_GEN2(dev))
+ 		ret <<= 1;
+ 	ret -=1;
+ 	return ret << 2;
+@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+-	0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
++	0xb000, 0x3000, 0x0800, 0x3000, 0xb000
++};
++
+ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+-	0x3000, 0x0800, 0x3000};
++	0x3000, 0x0800, 0x3000
++};
+ 
+ static void update_polyphase_filter(struct overlay_registers *regs)
+ {
+@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
+ 		yscale = 1 << FP_SHIFT;
+ 
+ 	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+-		xscale_UV = xscale/uv_hscale;
+-		yscale_UV = yscale/uv_vscale;
+-		/* make the Y scale to UV scale ratio an exact multiply */
+-		xscale = xscale_UV * uv_hscale;
+-		yscale = yscale_UV * uv_vscale;
++	xscale_UV = xscale/uv_hscale;
++	yscale_UV = yscale/uv_vscale;
++	/* make the Y scale to UV scale ratio an exact multiply */
++	xscale = xscale_UV * uv_hscale;
++	yscale = yscale_UV * uv_vscale;
+ 	/*} else {
+-		xscale_UV = 0;
+-		yscale_UV = 0;
+-	}*/
++	  xscale_UV = 0;
++	  yscale_UV = 0;
++	  }*/
+ 
+ 	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ 		scale_changed = true;
+ 	overlay->old_xscale = xscale;
+ 	overlay->old_yscale = yscale;
+ 
+-	regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+-		| ((xscale >> FP_SHIFT) << 16)
+-		| ((xscale & FRACT_MASK) << 3);
+-	regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+-		| ((xscale_UV >> FP_SHIFT) << 16)
+-		| ((xscale_UV & FRACT_MASK) << 3);
+-	regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+-		| ((yscale_UV >> FP_SHIFT) << 0);
++	regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
++			   ((xscale >> FP_SHIFT)  << 16) |
++			   ((xscale & FRACT_MASK) << 3));
++
++	regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
++			 ((xscale_UV >> FP_SHIFT)  << 16) |
++			 ((xscale_UV & FRACT_MASK) << 3));
++
++	regs->UVSCALEV = ((((yscale    >> FP_SHIFT) << 16) |
++			   ((yscale_UV >> FP_SHIFT) << 0)));
+ 
+ 	if (scale_changed)
+ 		update_polyphase_filter(regs);
+@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
+ 			    struct overlay_registers *regs)
+ {
+ 	u32 key = overlay->color_key;
++
+ 	switch (overlay->crtc->base.fb->bits_per_pixel) {
+-		case 8:
+-			regs->DCLRKV = 0;
+-			regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+-		case 16:
+-			if (overlay->crtc->base.fb->depth == 15) {
+-				regs->DCLRKV = RGB15_TO_COLORKEY(key);
+-				regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+-			} else {
+-				regs->DCLRKV = RGB16_TO_COLORKEY(key);
+-				regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+-			}
+-		case 24:
+-		case 32:
+-			regs->DCLRKV = key;
+-			regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
++	case 8:
++		regs->DCLRKV = 0;
++		regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
++		break;
++
++	case 16:
++		if (overlay->crtc->base.fb->depth == 15) {
++			regs->DCLRKV = RGB15_TO_COLORKEY(key);
++			regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
++		} else {
++			regs->DCLRKV = RGB16_TO_COLORKEY(key);
++			regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
++		}
++		break;
++
++	case 24:
++	case 32:
++		regs->DCLRKV = key;
++		regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
++		break;
+ 	}
+ }
+ 
+@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
+ 
+ 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ 		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+-			case I915_OVERLAY_YUV422:
+-				cmd |= OCMD_YUV_422_PLANAR;
+-				break;
+-			case I915_OVERLAY_YUV420:
+-				cmd |= OCMD_YUV_420_PLANAR;
+-				break;
+-			case I915_OVERLAY_YUV411:
+-			case I915_OVERLAY_YUV410:
+-				cmd |= OCMD_YUV_410_PLANAR;
+-				break;
++		case I915_OVERLAY_YUV422:
++			cmd |= OCMD_YUV_422_PLANAR;
++			break;
++		case I915_OVERLAY_YUV420:
++			cmd |= OCMD_YUV_420_PLANAR;
++			break;
++		case I915_OVERLAY_YUV411:
++		case I915_OVERLAY_YUV410:
++			cmd |= OCMD_YUV_410_PLANAR;
++			break;
+ 		}
+ 	} else { /* YUV packed */
+ 		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+-			case I915_OVERLAY_YUV422:
+-				cmd |= OCMD_YUV_422_PACKED;
+-				break;
+-			case I915_OVERLAY_YUV411:
+-				cmd |= OCMD_YUV_411_PACKED;
+-				break;
++		case I915_OVERLAY_YUV422:
++			cmd |= OCMD_YUV_422_PACKED;
++			break;
++		case I915_OVERLAY_YUV411:
++			cmd |= OCMD_YUV_411_PACKED;
++			break;
+ 		}
+ 
+ 		switch (params->format & I915_OVERLAY_SWAP_MASK) {
+-			case I915_OVERLAY_NO_SWAP:
+-				break;
+-			case I915_OVERLAY_UV_SWAP:
+-				cmd |= OCMD_UV_SWAP;
+-				break;
+-			case I915_OVERLAY_Y_SWAP:
+-				cmd |= OCMD_Y_SWAP;
+-				break;
+-			case I915_OVERLAY_Y_AND_UV_SWAP:
+-				cmd |= OCMD_Y_AND_UV_SWAP;
+-				break;
++		case I915_OVERLAY_NO_SWAP:
++			break;
++		case I915_OVERLAY_UV_SWAP:
++			cmd |= OCMD_UV_SWAP;
++			break;
++		case I915_OVERLAY_Y_SWAP:
++			cmd |= OCMD_Y_SWAP;
++			break;
++		case I915_OVERLAY_Y_AND_UV_SWAP:
++			cmd |= OCMD_Y_AND_UV_SWAP;
++			break;
+ 		}
+ 	}
+ 
+ 	return cmd;
+ }
+ 
+-int intel_overlay_do_put_image(struct intel_overlay *overlay,
+-			       struct drm_gem_object *new_bo,
+-			       struct put_image_params *params)
++static int intel_overlay_do_put_image(struct intel_overlay *overlay,
++				      struct drm_gem_object *new_bo,
++				      struct put_image_params *params)
+ {
+ 	int ret, tmp_width;
+ 	struct overlay_registers *regs;
+@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ 		goto out_unpin;
+ 
+ 	if (!overlay->active) {
+-		regs = intel_overlay_map_regs_atomic(overlay);
++		regs = intel_overlay_map_regs(overlay);
+ 		if (!regs) {
+ 			ret = -ENOMEM;
+ 			goto out_unpin;
+ 		}
+ 		regs->OCONFIG = OCONF_CC_OUT_8BIT;
+-		if (IS_I965GM(overlay->dev))
++		if (IS_GEN4(overlay->dev))
+ 			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ 		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ 			OCONF_PIPE_A : OCONF_PIPE_B;
+-		intel_overlay_unmap_regs_atomic(overlay);
++		intel_overlay_unmap_regs(overlay, regs);
+ 
+ 		ret = intel_overlay_on(overlay);
+ 		if (ret != 0)
+ 			goto out_unpin;
+ 	}
+ 
+-	regs = intel_overlay_map_regs_atomic(overlay);
++	regs = intel_overlay_map_regs(overlay);
+ 	if (!regs) {
+ 		ret = -ENOMEM;
+ 		goto out_unpin;
+@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ 
+ 	regs->SWIDTH = params->src_w;
+ 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+-			params->offset_Y, tmp_width);
++				       params->offset_Y, tmp_width);
+ 	regs->SHEIGHT = params->src_h;
+ 	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ 	regs->OSTRIDE = params->stride_Y;
+@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ 		u32 tmp_U, tmp_V;
+ 		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ 		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+-				params->src_w/uv_hscale);
++				      params->src_w/uv_hscale);
+ 		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+-				params->src_w/uv_hscale);
++				      params->src_w/uv_hscale);
+ 		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+ 		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ 		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ 
+ 	regs->OCMD = overlay_cmd_reg(params);
+ 
+-	intel_overlay_unmap_regs_atomic(overlay);
++	intel_overlay_unmap_regs(overlay, regs);
+ 
+-	intel_overlay_continue(overlay, scale_changed);
++	ret = intel_overlay_continue(overlay, scale_changed);
++	if (ret)
++		goto out_unpin;
+ 
+ 	overlay->old_vid_bo = overlay->vid_bo;
+ 	overlay->vid_bo = to_intel_bo(new_bo);
+@@ -829,20 +838,19 @@ out_unpin:
+ 	return ret;
+ }
+ 
+-int intel_overlay_switch_off(struct intel_overlay *overlay)
++int intel_overlay_switch_off(struct intel_overlay *overlay,
++			     bool interruptible)
+ {
+-	int ret;
+ 	struct overlay_registers *regs;
+ 	struct drm_device *dev = overlay->dev;
++	int ret;
+ 
+ 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ 	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ 
+-	if (overlay->hw_wedged) {
+-		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+-		if (ret != 0)
+-			return ret;
+-	}
++	ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
++	if (ret != 0)
++		return ret;
+ 
+ 	if (!overlay->active)
+ 		return 0;
+@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
+ 	if (ret != 0)
+ 		return ret;
+ 
+-	regs = intel_overlay_map_regs_atomic(overlay);
++	regs = intel_overlay_map_regs(overlay);
+ 	regs->OCMD = 0;
+-	intel_overlay_unmap_regs_atomic(overlay);
++	intel_overlay_unmap_regs(overlay, regs);
+ 
+-	ret = intel_overlay_off(overlay);
++	ret = intel_overlay_off(overlay, interruptible);
+ 	if (ret != 0)
+ 		return ret;
+ 
+ 	intel_overlay_off_tail(overlay);
+-
+ 	return 0;
+ }
+ 
+ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ 					  struct intel_crtc *crtc)
+ {
+-        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+-	u32 pipeconf;
+-	int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
++	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ 
+-	if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
++	if (!crtc->active)
+ 		return -EINVAL;
+ 
+-	pipeconf = I915_READ(pipeconf_reg);
+-
+ 	/* can't use the overlay with double wide pipe */
+-	if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
++	if (INTEL_INFO(overlay->dev)->gen < 4 &&
++	    (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+ {
+ 	struct drm_device *dev = overlay->dev;
+-        drm_i915_private_t *dev_priv = dev->dev_private;
+-	u32 ratio;
++	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	u32 pfit_control = I915_READ(PFIT_CONTROL);
++	u32 ratio;
+ 
+ 	/* XXX: This is not the same logic as in the xorg driver, but more in
+-	 * line with the intel documentation for the i965 */
+-	if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+-		ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+-	} else { /* on i965 use the PGM reg to read out the autoscaler values */
+-		ratio = I915_READ(PFIT_PGM_RATIOS);
+-		if (IS_I965G(dev))
+-			ratio >>= PFIT_VERT_SCALE_SHIFT_965;
++	 * line with the intel documentation for the i965
++	 */
++	if (INTEL_INFO(dev)->gen >= 4) {
++	       	/* on i965 use the PGM reg to read out the autoscaler values */
++		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
++	} else {
++		if (pfit_control & VERT_AUTO_SCALE)
++			ratio = I915_READ(PFIT_AUTO_RATIOS);
+ 		else
+-			ratio >>= PFIT_VERT_SCALE_SHIFT;
++			ratio = I915_READ(PFIT_PGM_RATIOS);
++		ratio >>= PFIT_VERT_SCALE_SHIFT;
+ 	}
+ 
+ 	overlay->pfit_vscale_ratio = ratio;
+@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
+ {
+ 	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+ 
+-	if ((rec->dst_x < mode->crtc_hdisplay)
+-	    && (rec->dst_x + rec->dst_width
+-		    <= mode->crtc_hdisplay)
+-	    && (rec->dst_y < mode->crtc_vdisplay)
+-	    && (rec->dst_y + rec->dst_height
+-		    <= mode->crtc_vdisplay))
++	if (rec->dst_x < mode->crtc_hdisplay &&
++	    rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
++	    rec->dst_y < mode->crtc_vdisplay &&
++	    rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
+ 		return 0;
+ 	else
+ 		return -EINVAL;
+@@ -940,53 +944,59 @@ static int check_overlay_src(struct drm_device *dev,
+ 			     struct drm_intel_overlay_put_image *rec,
+ 			     struct drm_gem_object *new_bo)
+ {
+-	u32 stride_mask;
+-	int depth;
+ 	int uv_hscale = uv_hsubsampling(rec->flags);
+ 	int uv_vscale = uv_vsubsampling(rec->flags);
+-	size_t tmp;
++	u32 stride_mask;
++	int depth;
++	u32 tmp;
+ 
+ 	/* check src dimensions */
+ 	if (IS_845G(dev) || IS_I830(dev)) {
+-		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+-		    || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
++		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
++		    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
+ 			return -EINVAL;
+ 	} else {
+-		if (rec->src_height > IMAGE_MAX_HEIGHT
+-		    || rec->src_width > IMAGE_MAX_WIDTH)
++		if (rec->src_height > IMAGE_MAX_HEIGHT ||
++		    rec->src_width  > IMAGE_MAX_WIDTH)
+ 			return -EINVAL;
+ 	}
++
+ 	/* better safe than sorry, use 4 as the maximal subsampling ratio */
+-	if (rec->src_height < N_VERT_Y_TAPS*4
+-	    || rec->src_width < N_HORIZ_Y_TAPS*4)
++	if (rec->src_height < N_VERT_Y_TAPS*4 ||
++	    rec->src_width  < N_HORIZ_Y_TAPS*4)
+ 		return -EINVAL;
+ 
+ 	/* check alignment constraints */
+ 	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+-		case I915_OVERLAY_RGB:
+-			/* not implemented */
++	case I915_OVERLAY_RGB:
++		/* not implemented */
++		return -EINVAL;
++
++	case I915_OVERLAY_YUV_PACKED:
++		if (uv_vscale != 1)
+ 			return -EINVAL;
+-		case I915_OVERLAY_YUV_PACKED:
+-			depth = packed_depth_bytes(rec->flags);
+-			if (uv_vscale != 1)
+-				return -EINVAL;
+-			if (depth < 0)
+-				return depth;
+-			/* ignore UV planes */
+-			rec->stride_UV = 0;
+-			rec->offset_U = 0;
+-			rec->offset_V = 0;
+-			/* check pixel alignment */
+-			if (rec->offset_Y % depth)
+-				return -EINVAL;
+-			break;
+-		case I915_OVERLAY_YUV_PLANAR:
+-			if (uv_vscale < 0 || uv_hscale < 0)
+-				return -EINVAL;
+-			/* no offset restrictions for planar formats */
+-			break;
+-		default:
++
++		depth = packed_depth_bytes(rec->flags);
++		if (depth < 0)
++			return depth;
++
++		/* ignore UV planes */
++		rec->stride_UV = 0;
++		rec->offset_U = 0;
++		rec->offset_V = 0;
++		/* check pixel alignment */
++		if (rec->offset_Y % depth)
+ 			return -EINVAL;
++		break;
++
++	case I915_OVERLAY_YUV_PLANAR:
++		if (uv_vscale < 0 || uv_hscale < 0)
++			return -EINVAL;
++		/* no offset restrictions for planar formats */
++		break;
++
++	default:
++		return -EINVAL;
+ 	}
+ 
+ 	if (rec->src_width % uv_hscale)
+@@ -1000,47 +1010,74 @@ static int check_overlay_src(struct drm_device *dev,
+ 
+ 	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ 		return -EINVAL;
+-	if (IS_I965G(dev) && rec->stride_Y < 512)
++	if (IS_GEN4(dev) && rec->stride_Y < 512)
+ 		return -EINVAL;
+ 
+ 	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+-		4 : 8;
+-	if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
++		4096 : 8192;
++	if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ 		return -EINVAL;
+ 
+ 	/* check buffer dimensions */
+ 	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+-		case I915_OVERLAY_RGB:
+-		case I915_OVERLAY_YUV_PACKED:
+-			/* always 4 Y values per depth pixels */
+-			if (packed_width_bytes(rec->flags, rec->src_width)
+-					> rec->stride_Y)
+-				return -EINVAL;
+-
+-			tmp = rec->stride_Y*rec->src_height;
+-			if (rec->offset_Y + tmp > new_bo->size)
+-				return -EINVAL;
+-			break;
+-		case I915_OVERLAY_YUV_PLANAR:
+-			if (rec->src_width > rec->stride_Y)
+-				return -EINVAL;
+-			if (rec->src_width/uv_hscale > rec->stride_UV)
+-				return -EINVAL;
+-
+-			tmp = rec->stride_Y*rec->src_height;
+-			if (rec->offset_Y + tmp > new_bo->size)
+-				return -EINVAL;
+-			tmp = rec->stride_UV*rec->src_height;
+-			tmp /= uv_vscale;
+-			if (rec->offset_U + tmp > new_bo->size
+-			    || rec->offset_V + tmp > new_bo->size)
+-				return -EINVAL;
+-			break;
++	case I915_OVERLAY_RGB:
++	case I915_OVERLAY_YUV_PACKED:
++		/* always 4 Y values per depth pixels */
++		if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
++			return -EINVAL;
++
++		tmp = rec->stride_Y*rec->src_height;
++		if (rec->offset_Y + tmp > new_bo->size)
++			return -EINVAL;
++		break;
++
++	case I915_OVERLAY_YUV_PLANAR:
++		if (rec->src_width > rec->stride_Y)
++			return -EINVAL;
++		if (rec->src_width/uv_hscale > rec->stride_UV)
++			return -EINVAL;
++
++		tmp = rec->stride_Y * rec->src_height;
++		if (rec->offset_Y + tmp > new_bo->size)
++			return -EINVAL;
++
++		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
++		if (rec->offset_U + tmp > new_bo->size ||
++		    rec->offset_V + tmp > new_bo->size)
++			return -EINVAL;
++		break;
+ 	}
+ 
+ 	return 0;
+ }
+ 
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int intel_panel_fitter_pipe(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32  pfit_control;
++
++	/* i830 doesn't have a panel fitter */
++	if (IS_I830(dev))
++		return -1;
++
++	pfit_control = I915_READ(PFIT_CONTROL);
++
++	/* See if the panel fitter is in use */
++	if ((pfit_control & PFIT_ENABLE) == 0)
++		return -1;
++
++	/* 965 can place panel fitter on either pipe */
++	if (IS_GEN4(dev))
++		return (pfit_control >> 29) & 0x3;
++
++	/* older chips can only use pipe 1 */
++	return 1;
++}
++
+ int intel_overlay_put_image(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+ {
+@@ -1068,7 +1105,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 		mutex_lock(&dev->mode_config.mutex);
+ 		mutex_lock(&dev->struct_mutex);
+ 
+-		ret = intel_overlay_switch_off(overlay);
++		ret = intel_overlay_switch_off(overlay, true);
+ 
+ 		mutex_unlock(&dev->struct_mutex);
+ 		mutex_unlock(&dev->mode_config.mutex);
+@@ -1081,7 +1118,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 		return -ENOMEM;
+ 
+ 	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+-                        DRM_MODE_OBJECT_CRTC);
++					   DRM_MODE_OBJECT_CRTC);
+ 	if (!drmmode_obj) {
+ 		ret = -ENOENT;
+ 		goto out_free;
+@@ -1089,7 +1126,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ 
+ 	new_bo = drm_gem_object_lookup(dev, file_priv,
+-			put_image_rec->bo_handle);
++				       put_image_rec->bo_handle);
+ 	if (!new_bo) {
+ 		ret = -ENOENT;
+ 		goto out_free;
+@@ -1098,15 +1135,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	if (overlay->hw_wedged) {
+-		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+-		if (ret != 0)
+-			goto out_unlock;
+-	}
++	ret = intel_overlay_recover_from_interrupt(overlay, true);
++	if (ret != 0)
++		goto out_unlock;
+ 
+ 	if (overlay->crtc != crtc) {
+ 		struct drm_display_mode *mode = &crtc->base.mode;
+-		ret = intel_overlay_switch_off(overlay);
++		ret = intel_overlay_switch_off(overlay, true);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 
+@@ -1117,9 +1152,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 		overlay->crtc = crtc;
+ 		crtc->overlay = overlay;
+ 
+-		if (intel_panel_fitter_pipe(dev) == crtc->pipe
+-		    /* and line to wide, i.e. one-line-mode */
+-		    && mode->hdisplay > 1024) {
++		/* line too wide, i.e. one-line-mode */
++		if (mode->hdisplay > 1024 &&
++		    intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ 			overlay->pfit_active = 1;
+ 			update_pfit_vscale_ratio(overlay);
+ 		} else
+@@ -1132,10 +1167,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 
+ 	if (overlay->pfit_active) {
+ 		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+-			overlay->pfit_vscale_ratio);
++				 overlay->pfit_vscale_ratio);
+ 		/* shifting right rounds downwards, so add 1 */
+ 		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+-			overlay->pfit_vscale_ratio) + 1;
++				 overlay->pfit_vscale_ratio) + 1;
+ 	} else {
+ 		params->dst_y = put_image_rec->dst_y;
+ 		params->dst_h = put_image_rec->dst_height;
+@@ -1147,8 +1182,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ 	params->src_h = put_image_rec->src_height;
+ 	params->src_scan_w = put_image_rec->src_scan_width;
+ 	params->src_scan_h = put_image_rec->src_scan_height;
+-	if (params->src_scan_h > params->src_h
+-	    || params->src_scan_w > params->src_w) {
++	if (params->src_scan_h > params->src_h ||
++	    params->src_scan_w > params->src_w) {
+ 		ret = -EINVAL;
+ 		goto out_unlock;
+ 	}
+@@ -1204,7 +1239,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+ 		return false;
+ 
+ 	for (i = 0; i < 3; i++) {
+-		if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
++		if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ 			return false;
+ 	}
+ 
+@@ -1225,16 +1260,18 @@ static bool check_gamma5_errata(u32 gamma5)
+ 
+ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+ {
+-	if (!check_gamma_bounds(0, attrs->gamma0)
+-	    || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+-	    || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+-	    || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+-	    || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+-	    || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+-	    || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
++	if (!check_gamma_bounds(0, attrs->gamma0) ||
++	    !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
++	    !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
++	    !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
++	    !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
++	    !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
++	    !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ 		return -EINVAL;
++
+ 	if (!check_gamma5_errata(attrs->gamma5))
+ 		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+@@ -1261,13 +1298,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	mutex_lock(&dev->struct_mutex);
+ 
++	ret = -EINVAL;
+ 	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+-		attrs->color_key = overlay->color_key;
++		attrs->color_key  = overlay->color_key;
+ 		attrs->brightness = overlay->brightness;
+-		attrs->contrast = overlay->contrast;
++		attrs->contrast   = overlay->contrast;
+ 		attrs->saturation = overlay->saturation;
+ 
+-		if (IS_I9XX(dev)) {
++		if (!IS_GEN2(dev)) {
+ 			attrs->gamma0 = I915_READ(OGAMC0);
+ 			attrs->gamma1 = I915_READ(OGAMC1);
+ 			attrs->gamma2 = I915_READ(OGAMC2);
+@@ -1275,29 +1313,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ 			attrs->gamma4 = I915_READ(OGAMC4);
+ 			attrs->gamma5 = I915_READ(OGAMC5);
+ 		}
+-		ret = 0;
+ 	} else {
+-		overlay->color_key = attrs->color_key;
+-		if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+-			overlay->brightness = attrs->brightness;
+-		} else {
+-			ret = -EINVAL;
++		if (attrs->brightness < -128 || attrs->brightness > 127)
+ 			goto out_unlock;
+-		}
+-		if (attrs->contrast <= 255) {
+-			overlay->contrast = attrs->contrast;
+-		} else {
+-			ret = -EINVAL;
++		if (attrs->contrast > 255)
+ 			goto out_unlock;
+-		}
+-		if (attrs->saturation <= 1023) {
+-			overlay->saturation = attrs->saturation;
+-		} else {
+-			ret = -EINVAL;
++		if (attrs->saturation > 1023)
+ 			goto out_unlock;
+-		}
+ 
+-		regs = intel_overlay_map_regs_atomic(overlay);
++		overlay->color_key  = attrs->color_key;
++		overlay->brightness = attrs->brightness;
++		overlay->contrast   = attrs->contrast;
++		overlay->saturation = attrs->saturation;
++
++		regs = intel_overlay_map_regs(overlay);
+ 		if (!regs) {
+ 			ret = -ENOMEM;
+ 			goto out_unlock;
+@@ -1305,13 +1334,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ 
+ 		update_reg_attrs(overlay, regs);
+ 
+-		intel_overlay_unmap_regs_atomic(overlay);
++		intel_overlay_unmap_regs(overlay, regs);
+ 
+ 		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+-			if (!IS_I9XX(dev)) {
+-				ret = -EINVAL;
++			if (IS_GEN2(dev))
+ 				goto out_unlock;
+-			}
+ 
+ 			if (overlay->active) {
+ 				ret = -EBUSY;
+@@ -1319,7 +1346,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ 			}
+ 
+ 			ret = check_gamma(attrs);
+-			if (ret != 0)
++			if (ret)
+ 				goto out_unlock;
+ 
+ 			I915_WRITE(OGAMC0, attrs->gamma0);
+@@ -1329,9 +1356,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ 			I915_WRITE(OGAMC4, attrs->gamma4);
+ 			I915_WRITE(OGAMC5, attrs->gamma5);
+ 		}
+-		ret = 0;
+ 	}
+ 
++	ret = 0;
+ out_unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+ 	mutex_unlock(&dev->mode_config.mutex);
+@@ -1347,7 +1374,7 @@ void intel_setup_overlay(struct drm_device *dev)
+ 	struct overlay_registers *regs;
+ 	int ret;
+ 
+-	if (!OVERLAY_EXISTS(dev))
++	if (!HAS_OVERLAY(dev))
+ 		return;
+ 
+ 	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+@@ -1360,22 +1387,28 @@ void intel_setup_overlay(struct drm_device *dev)
+ 		goto out_free;
+ 	overlay->reg_bo = to_intel_bo(reg_bo);
+ 
+-	if (OVERLAY_NONPHYSICAL(dev)) {
+-		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+-		if (ret) {
+-                        DRM_ERROR("failed to pin overlay register bo\n");
+-                        goto out_free_bo;
+-                }
+-		overlay->flip_addr = overlay->reg_bo->gtt_offset;
+-	} else {
++	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ 		ret = i915_gem_attach_phys_object(dev, reg_bo,
+ 						  I915_GEM_PHYS_OVERLAY_REGS,
+-						  0);
++						  PAGE_SIZE);
+                 if (ret) {
+                         DRM_ERROR("failed to attach phys overlay regs\n");
+                         goto out_free_bo;
+                 }
+ 		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
++	} else {
++		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
++		if (ret) {
++                        DRM_ERROR("failed to pin overlay register bo\n");
++                        goto out_free_bo;
++                }
++		overlay->flip_addr = overlay->reg_bo->gtt_offset;
++
++		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
++		if (ret) {
++                        DRM_ERROR("failed to move overlay register bo into the GTT\n");
++                        goto out_unpin_bo;
++                }
+ 	}
+ 
+ 	/* init all values */
+@@ -1384,21 +1417,22 @@ void intel_setup_overlay(struct drm_device *dev)
+ 	overlay->contrast = 75;
+ 	overlay->saturation = 146;
+ 
+-	regs = intel_overlay_map_regs_atomic(overlay);
++	regs = intel_overlay_map_regs(overlay);
+ 	if (!regs)
+ 		goto out_free_bo;
+ 
+ 	memset(regs, 0, sizeof(struct overlay_registers));
+ 	update_polyphase_filter(regs);
+-
+ 	update_reg_attrs(overlay, regs);
+ 
+-	intel_overlay_unmap_regs_atomic(overlay);
++	intel_overlay_unmap_regs(overlay, regs);
+ 
+ 	dev_priv->overlay = overlay;
+ 	DRM_INFO("initialized overlay support\n");
+ 	return;
+ 
++out_unpin_bo:
++	i915_gem_object_unpin(reg_bo);
+ out_free_bo:
+ 	drm_gem_object_unreference(reg_bo);
+ out_free:
+@@ -1408,18 +1442,23 @@ out_free:
+ 
+ void intel_cleanup_overlay(struct drm_device *dev)
+ {
+-        drm_i915_private_t *dev_priv = dev->dev_private;
++	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	if (dev_priv->overlay) {
+-		/* The bo's should be free'd by the generic code already.
+-		 * Furthermore modesetting teardown happens beforehand so the
+-		 * hardware should be off already */
+-		BUG_ON(dev_priv->overlay->active);
++	if (!dev_priv->overlay)
++		return;
+ 
+-		kfree(dev_priv->overlay);
+-	}
++	/* The bo's should be free'd by the generic code already.
++	 * Furthermore modesetting teardown happens beforehand so the
++	 * hardware should be off already */
++	BUG_ON(dev_priv->overlay->active);
++
++	drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
++	kfree(dev_priv->overlay);
+ }
+ 
++#ifdef CONFIG_DEBUG_FS
++#include <linux/seq_file.h>
++
+ struct intel_overlay_error_state {
+ 	struct overlay_registers regs;
+ 	unsigned long base;
+@@ -1427,6 +1466,30 @@ struct intel_overlay_error_state {
+ 	u32 isr;
+ };
+ 
++static struct overlay_registers *
++intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
++{
++	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++	struct overlay_registers *regs;
++
++	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++		regs = overlay->reg_bo->phys_obj->handle->vaddr;
++	else
++		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++						overlay->reg_bo->gtt_offset,
++						KM_USER0);
++
++	return regs;
++}
++
++static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
++					    struct overlay_registers *regs)
++{
++	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++		io_mapping_unmap_atomic(regs, KM_USER0);
++}
++
++
+ struct intel_overlay_error_state *
+ intel_overlay_capture_error_state(struct drm_device *dev)
+ {
+@@ -1444,17 +1507,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
+ 
+ 	error->dovsta = I915_READ(DOVSTA);
+ 	error->isr = I915_READ(ISR);
+-	if (OVERLAY_NONPHYSICAL(overlay->dev))
+-		error->base = (long) overlay->reg_bo->gtt_offset;
+-	else
++	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ 		error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
++	else
++		error->base = (long) overlay->reg_bo->gtt_offset;
+ 
+ 	regs = intel_overlay_map_regs_atomic(overlay);
+ 	if (!regs)
+ 		goto err;
+ 
+ 	memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+-	intel_overlay_unmap_regs_atomic(overlay);
++	intel_overlay_unmap_regs_atomic(overlay, regs);
+ 
+ 	return error;
+ 
+@@ -1515,3 +1578,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
+ 	P(UVSCALEV);
+ #undef P
+ }
++#endif
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index e7f5299..92ff8f3 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -30,6 +30,8 @@
+ 
+ #include "intel_drv.h"
+ 
++#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
++
+ void
+ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ 		       struct drm_display_mode *adjusted_mode)
+@@ -109,3 +111,110 @@ done:
+ 	dev_priv->pch_pf_pos = (x << 16) | y;
+ 	dev_priv->pch_pf_size = (width << 16) | height;
+ }
++
++static int is_backlight_combination_mode(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (INTEL_INFO(dev)->gen >= 4)
++		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
++
++	if (IS_GEN2(dev))
++		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
++
++	return 0;
++}
++
++u32 intel_panel_get_max_backlight(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 max;
++
++	if (HAS_PCH_SPLIT(dev)) {
++		max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
++	} else {
++		max = I915_READ(BLC_PWM_CTL);
++		if (IS_PINEVIEW(dev)) {
++			max >>= 17;
++		} else {
++			max >>= 16;
++			if (INTEL_INFO(dev)->gen < 4)
++				max &= ~1;
++		}
++
++		if (is_backlight_combination_mode(dev))
++			max *= 0xff;
++	}
++
++	if (max == 0) {
++		/* XXX add code here to query mode clock or hardware clock
++		 * and program max PWM appropriately.
++		 */
++		DRM_ERROR("fixme: max PWM is zero.\n");
++		max = 1;
++	}
++
++	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
++	return max;
++}
++
++u32 intel_panel_get_backlight(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 val;
++
++	if (HAS_PCH_SPLIT(dev)) {
++		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
++	} else {
++		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
++		if (IS_PINEVIEW(dev))
++			val >>= 1;
++
++		if (is_backlight_combination_mode(dev)){
++			u8 lbpc;
++
++			val &= ~1;
++			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
++			val *= lbpc;
++			val >>= 1;
++		}
++	}
++
++	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
++	return val;
++}
++
++static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
++}
++
++void intel_panel_set_backlight(struct drm_device *dev, u32 level)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 tmp;
++
++	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
++
++	if (HAS_PCH_SPLIT(dev))
++		return intel_pch_panel_set_backlight(dev, level);
++
++	if (is_backlight_combination_mode(dev)){
++		u32 max = intel_panel_get_max_backlight(dev);
++		u8 lpbc;
++
++		lpbc = level * 0xfe / max + 1;
++		level /= lpbc;
++		pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
++	}
++
++	tmp = I915_READ(BLC_PWM_CTL);
++	if (IS_PINEVIEW(dev)) {
++		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
++		level <<= 1;
++	} else
++		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++	I915_WRITE(BLC_PWM_CTL, tmp | level);
++}
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index cb3508f..b83306f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -32,6 +32,7 @@
+ #include "i915_drv.h"
+ #include "i915_drm.h"
+ #include "i915_trace.h"
++#include "intel_drv.h"
+ 
+ static u32 i915_gem_get_seqno(struct drm_device *dev)
+ {
+@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
+ 
+ static void
+ render_ring_flush(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		u32	invalidate_domains,
+-		u32	flush_domains)
++		  struct intel_ring_buffer *ring,
++		  u32	invalidate_domains,
++		  u32	flush_domains)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	u32 cmd;
+@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
+ 		if ((invalidate_domains|flush_domains) &
+ 		    I915_GEM_DOMAIN_RENDER)
+ 			cmd &= ~MI_NO_WRITE_FLUSH;
+-		if (!IS_I965G(dev)) {
++		if (INTEL_INFO(dev)->gen < 4) {
+ 			/*
+ 			 * On the 965, the sampler cache always gets flushed
+ 			 * and this bit is reserved.
+@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
+ 	}
+ }
+ 
+-static unsigned int render_ring_get_head(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+-}
+-
+-static unsigned int render_ring_get_tail(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++static void ring_write_tail(struct drm_device *dev,
++			    struct intel_ring_buffer *ring,
++			    u32 value)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	return I915_READ(PRB0_TAIL) & TAIL_ADDR;
++	I915_WRITE_TAIL(ring, value);
+ }
+ 
+-static unsigned int render_ring_get_active_head(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++u32 intel_ring_get_active_head(struct drm_device *dev,
++			       struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
++	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
++			RING_ACTHD(ring->mmio_base) : ACTHD;
+ 
+ 	return I915_READ(acthd_reg);
+ }
+ 
+-static void render_ring_advance_ring(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	I915_WRITE(PRB0_TAIL, ring->tail);
+-}
+-
+ static int init_ring_common(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			    struct intel_ring_buffer *ring)
+ {
+ 	u32 head;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
+ 	obj_priv = to_intel_bo(ring->gem_object);
+ 
+ 	/* Stop the ring if it's running. */
+-	I915_WRITE(ring->regs.ctl, 0);
+-	I915_WRITE(ring->regs.head, 0);
+-	I915_WRITE(ring->regs.tail, 0);
++	I915_WRITE_CTL(ring, 0);
++	I915_WRITE_HEAD(ring, 0);
++	ring->write_tail(dev, ring, 0);
+ 
+ 	/* Initialize the ring. */
+-	I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+-	head = ring->get_head(dev, ring);
++	I915_WRITE_START(ring, obj_priv->gtt_offset);
++	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ 
+ 	/* G45 ring initialization fails to reset head to zero */
+ 	if (head != 0) {
+ 		DRM_ERROR("%s head not reset to zero "
+ 				"ctl %08x head %08x tail %08x start %08x\n",
+ 				ring->name,
+-				I915_READ(ring->regs.ctl),
+-				I915_READ(ring->regs.head),
+-				I915_READ(ring->regs.tail),
+-				I915_READ(ring->regs.start));
++				I915_READ_CTL(ring),
++				I915_READ_HEAD(ring),
++				I915_READ_TAIL(ring),
++				I915_READ_START(ring));
+ 
+-		I915_WRITE(ring->regs.head, 0);
++		I915_WRITE_HEAD(ring, 0);
+ 
+ 		DRM_ERROR("%s head forced to zero "
+ 				"ctl %08x head %08x tail %08x start %08x\n",
+ 				ring->name,
+-				I915_READ(ring->regs.ctl),
+-				I915_READ(ring->regs.head),
+-				I915_READ(ring->regs.tail),
+-				I915_READ(ring->regs.start));
++				I915_READ_CTL(ring),
++				I915_READ_HEAD(ring),
++				I915_READ_TAIL(ring),
++				I915_READ_START(ring));
+ 	}
+ 
+-	I915_WRITE(ring->regs.ctl,
++	I915_WRITE_CTL(ring,
+ 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+-			| RING_NO_REPORT | RING_VALID);
++			| RING_REPORT_64K | RING_VALID);
+ 
+-	head = I915_READ(ring->regs.head) & HEAD_ADDR;
++	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ 	/* If the head is still not zero, the ring is dead */
+ 	if (head != 0) {
+ 		DRM_ERROR("%s initialization failed "
+ 				"ctl %08x head %08x tail %08x start %08x\n",
+ 				ring->name,
+-				I915_READ(ring->regs.ctl),
+-				I915_READ(ring->regs.head),
+-				I915_READ(ring->regs.tail),
+-				I915_READ(ring->regs.start));
++				I915_READ_CTL(ring),
++				I915_READ_HEAD(ring),
++				I915_READ_TAIL(ring),
++				I915_READ_START(ring));
+ 		return -EIO;
+ 	}
+ 
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		i915_kernel_lost_context(dev);
+ 	else {
+-		ring->head = ring->get_head(dev, ring);
+-		ring->tail = ring->get_tail(dev, ring);
++		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
++		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ 		ring->space = ring->head - (ring->tail + 8);
+ 		if (ring->space < 0)
+ 			ring->space += ring->size;
+@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
+ }
+ 
+ static int init_render_ring(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			    struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret = init_ring_common(dev, ring);
+ 	int mode;
+ 
+-	if (IS_I9XX(dev) && !IS_GEN3(dev)) {
++	if (INTEL_INFO(dev)->gen > 3) {
+ 		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ 		if (IS_GEN6(dev))
+ 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+@@ -250,9 +239,8 @@ do {									\
+  */
+ static u32
+ render_ring_add_request(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		struct drm_file *file_priv,
+-		u32 flush_domains)
++			struct intel_ring_buffer *ring,
++			u32 flush_domains)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	u32 seqno;
+@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
+ }
+ 
+ static u32
+-render_ring_get_gem_seqno(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++render_ring_get_seqno(struct drm_device *dev,
++		      struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	if (HAS_PIPE_CONTROL(dev))
+@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
+ 
+ static void
+ render_ring_get_user_irq(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			 struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	unsigned long irqflags;
+@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
+ 
+ static void
+ render_ring_put_user_irq(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			 struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	unsigned long irqflags;
+@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
+ 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ }
+ 
+-static void render_setup_status_page(struct drm_device *dev,
+-	struct	intel_ring_buffer *ring)
++void intel_ring_setup_status_page(struct drm_device *dev,
++				  struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	if (IS_GEN6(dev)) {
+-		I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+-		I915_READ(HWS_PGA_GEN6); /* posting read */
++		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
++			   ring->status_page.gfx_addr);
++		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
+ 	} else {
+-		I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+-		I915_READ(HWS_PGA); /* posting read */
++		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
++			   ring->status_page.gfx_addr);
++		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
+ 	}
+ 
+ }
+ 
+-void
++static void
+ bsd_ring_flush(struct drm_device *dev,
+ 		struct intel_ring_buffer *ring,
+ 		u32     invalidate_domains,
+@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
+ 	intel_ring_advance(dev, ring);
+ }
+ 
+-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+-}
+-
+-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+-}
+-
+-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	return I915_READ(BSD_RING_ACTHD);
+-}
+-
+-static inline void bsd_ring_advance_ring(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	I915_WRITE(BSD_RING_TAIL, ring->tail);
+-}
+-
+ static int init_bsd_ring(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			 struct intel_ring_buffer *ring)
+ {
+ 	return init_ring_common(dev, ring);
+ }
+ 
+ static u32
+-bsd_ring_add_request(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		struct drm_file *file_priv,
+-		u32 flush_domains)
++ring_add_request(struct drm_device *dev,
++		 struct intel_ring_buffer *ring,
++		 u32 flush_domains)
+ {
+ 	u32 seqno;
+ 
+@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
+ 	return seqno;
+ }
+ 
+-static void bsd_setup_status_page(struct drm_device *dev,
+-		struct  intel_ring_buffer *ring)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+-	I915_READ(BSD_HWS_PGA);
+-}
+-
+ static void
+ bsd_ring_get_user_irq(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++		      struct intel_ring_buffer *ring)
+ {
+ 	/* do nothing */
+ }
+ static void
+ bsd_ring_put_user_irq(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++		      struct intel_ring_buffer *ring)
+ {
+ 	/* do nothing */
+ }
+ 
+ static u32
+-bsd_ring_get_gem_seqno(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++ring_status_page_get_seqno(struct drm_device *dev,
++			   struct intel_ring_buffer *ring)
+ {
+ 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ }
+ 
+ static int
+-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		struct drm_i915_gem_execbuffer2 *exec,
+-		struct drm_clip_rect *cliprects,
+-		uint64_t exec_offset)
++ring_dispatch_gem_execbuffer(struct drm_device *dev,
++			     struct intel_ring_buffer *ring,
++			     struct drm_i915_gem_execbuffer2 *exec,
++			     struct drm_clip_rect *cliprects,
++			     uint64_t exec_offset)
+ {
+ 	uint32_t exec_start;
+ 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ 	return 0;
+ }
+ 
+-
+ static int
+ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		struct drm_i915_gem_execbuffer2 *exec,
+-		struct drm_clip_rect *cliprects,
+-		uint64_t exec_offset)
++				    struct intel_ring_buffer *ring,
++				    struct drm_i915_gem_execbuffer2 *exec,
++				    struct drm_clip_rect *cliprects,
++				    uint64_t exec_offset)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int nbox = exec->num_cliprects;
+@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ 			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+ 			intel_ring_emit(dev, ring, 0);
+ 		} else {
+-			intel_ring_begin(dev, ring, 4);
+-			if (IS_I965G(dev)) {
++			intel_ring_begin(dev, ring, 2);
++			if (INTEL_INFO(dev)->gen >= 4) {
+ 				intel_ring_emit(dev, ring,
+ 						MI_BATCH_BUFFER_START | (2 << 6)
+ 						| MI_BATCH_NON_SECURE_I965);
+@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ 		intel_ring_advance(dev, ring);
+ 	}
+ 
+-	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
++	if (IS_G4X(dev) || IS_GEN5(dev)) {
+ 		intel_ring_begin(dev, ring, 2);
+ 		intel_ring_emit(dev, ring, MI_FLUSH |
+ 				MI_NO_WRITE_FLUSH |
+@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ }
+ 
+ static void cleanup_status_page(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++				struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_gem_object *obj;
+@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
+ }
+ 
+ static int init_status_page(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			    struct intel_ring_buffer *ring)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_gem_object *obj;
+@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
+ 	ring->status_page.obj = obj;
+ 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ 
+-	ring->setup_status_page(dev, ring);
++	intel_ring_setup_status_page(dev, ring);
+ 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ 			ring->name, ring->status_page.gfx_addr);
+ 
+@@ -617,15 +569,18 @@ err:
+ 	return ret;
+ }
+ 
+-
+ int intel_init_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			   struct intel_ring_buffer *ring)
+ {
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	struct drm_gem_object *obj;
+ 	int ret;
+ 
+ 	ring->dev = dev;
++	INIT_LIST_HEAD(&ring->active_list);
++	INIT_LIST_HEAD(&ring->request_list);
++	INIT_LIST_HEAD(&ring->gpu_write_list);
+ 
+ 	if (I915_NEED_GFX_HWS(dev)) {
+ 		ret = init_status_page(dev, ring);
+@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
+ 
+ 	ring->gem_object = obj;
+ 
+-	ret = i915_gem_object_pin(obj, ring->alignment);
++	ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ 	if (ret)
+ 		goto err_unref;
+ 
+@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		i915_kernel_lost_context(dev);
+ 	else {
+-		ring->head = ring->get_head(dev, ring);
+-		ring->tail = ring->get_tail(dev, ring);
++		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
++		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ 		ring->space = ring->head - (ring->tail + 8);
+ 		if (ring->space < 0)
+ 			ring->space += ring->size;
+ 	}
+-	INIT_LIST_HEAD(&ring->active_list);
+-	INIT_LIST_HEAD(&ring->request_list);
+ 	return ret;
+ 
+ err_unmap:
+@@ -691,7 +644,7 @@ err_hws:
+ }
+ 
+ void intel_cleanup_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			       struct intel_ring_buffer *ring)
+ {
+ 	if (ring->gem_object == NULL)
+ 		return;
+@@ -701,11 +654,15 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
+ 	i915_gem_object_unpin(ring->gem_object);
+ 	drm_gem_object_unreference(ring->gem_object);
+ 	ring->gem_object = NULL;
++
++	if (ring->cleanup)
++		ring->cleanup(ring);
++
+ 	cleanup_status_page(dev, ring);
+ }
+ 
+-int intel_wrap_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++static int intel_wrap_ring_buffer(struct drm_device *dev,
++				  struct intel_ring_buffer *ring)
+ {
+ 	unsigned int *virt;
+ 	int rem;
+@@ -731,14 +688,26 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
+ }
+ 
+ int intel_wait_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring, int n)
++			   struct intel_ring_buffer *ring, int n)
+ {
+ 	unsigned long end;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 head;
++
++	head = intel_read_status_page(ring, 4);
++	if (head) {
++		ring->head = head & HEAD_ADDR;
++		ring->space = ring->head - (ring->tail + 8);
++		if (ring->space < 0)
++			ring->space += ring->size;
++		if (ring->space >= n)
++			return 0;
++	}
+ 
+ 	trace_i915_ring_wait_begin (dev);
+ 	end = jiffies + 3 * HZ;
+ 	do {
+-		ring->head = ring->get_head(dev, ring);
++		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ 		ring->space = ring->head - (ring->tail + 8);
+ 		if (ring->space < 0)
+ 			ring->space += ring->size;
+@@ -753,14 +722,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
+ 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ 		}
+ 
+-		yield();
++		msleep(1);
+ 	} while (!time_after(jiffies, end));
+ 	trace_i915_ring_wait_end (dev);
+ 	return -EBUSY;
+ }
+ 
+ void intel_ring_begin(struct drm_device *dev,
+-		struct intel_ring_buffer *ring, int num_dwords)
++		      struct intel_ring_buffer *ring,
++		      int num_dwords)
+ {
+ 	int n = 4*num_dwords;
+ 	if (unlikely(ring->tail + n > ring->size))
+@@ -772,97 +742,287 @@ void intel_ring_begin(struct drm_device *dev,
+ }
+ 
+ void intel_ring_advance(struct drm_device *dev,
+-		struct intel_ring_buffer *ring)
++			struct intel_ring_buffer *ring)
+ {
+ 	ring->tail &= ring->size - 1;
+-	ring->advance_ring(dev, ring);
++	ring->write_tail(dev, ring, ring->tail);
+ }
+ 
+-void intel_fill_struct(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		void *data,
+-		unsigned int len)
+-{
+-	unsigned int *virt = ring->virtual_start + ring->tail;
+-	BUG_ON((len&~(4-1)) != 0);
+-	intel_ring_begin(dev, ring, len/4);
+-	memcpy(virt, data, len);
+-	ring->tail += len;
+-	ring->tail &= ring->size - 1;
+-	ring->space -= len;
+-	intel_ring_advance(dev, ring);
+-}
+-
+-struct intel_ring_buffer render_ring = {
++static const struct intel_ring_buffer render_ring = {
+ 	.name			= "render ring",
+-	.regs                   = {
+-		.ctl = PRB0_CTL,
+-		.head = PRB0_HEAD,
+-		.tail = PRB0_TAIL,
+-		.start = PRB0_START
+-	},
+-	.ring_flag		= I915_EXEC_RENDER,
++	.id			= RING_RENDER,
++	.mmio_base		= RENDER_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+-	.alignment		= PAGE_SIZE,
+-	.virtual_start		= NULL,
+-	.dev			= NULL,
+-	.gem_object		= NULL,
+-	.head			= 0,
+-	.tail			= 0,
+-	.space			= 0,
+-	.user_irq_refcount	= 0,
+-	.irq_gem_seqno		= 0,
+-	.waiting_gem_seqno	= 0,
+-	.setup_status_page	= render_setup_status_page,
+ 	.init			= init_render_ring,
+-	.get_head		= render_ring_get_head,
+-	.get_tail		= render_ring_get_tail,
+-	.get_active_head	= render_ring_get_active_head,
+-	.advance_ring		= render_ring_advance_ring,
++	.write_tail		= ring_write_tail,
+ 	.flush			= render_ring_flush,
+ 	.add_request		= render_ring_add_request,
+-	.get_gem_seqno		= render_ring_get_gem_seqno,
++	.get_seqno		= render_ring_get_seqno,
+ 	.user_irq_get		= render_ring_get_user_irq,
+ 	.user_irq_put		= render_ring_put_user_irq,
+ 	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+-	.status_page		= {NULL, 0, NULL},
+-	.map			= {0,}
+ };
+ 
+ /* ring buffer for bit-stream decoder */
+ 
+-struct intel_ring_buffer bsd_ring = {
++static const struct intel_ring_buffer bsd_ring = {
+ 	.name                   = "bsd ring",
+-	.regs			= {
+-		.ctl = BSD_RING_CTL,
+-		.head = BSD_RING_HEAD,
+-		.tail = BSD_RING_TAIL,
+-		.start = BSD_RING_START
+-	},
+-	.ring_flag		= I915_EXEC_BSD,
++	.id			= RING_BSD,
++	.mmio_base		= BSD_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+-	.alignment		= PAGE_SIZE,
+-	.virtual_start		= NULL,
+-	.dev			= NULL,
+-	.gem_object		= NULL,
+-	.head			= 0,
+-	.tail			= 0,
+-	.space			= 0,
+-	.user_irq_refcount	= 0,
+-	.irq_gem_seqno		= 0,
+-	.waiting_gem_seqno	= 0,
+-	.setup_status_page	= bsd_setup_status_page,
+ 	.init			= init_bsd_ring,
+-	.get_head		= bsd_ring_get_head,
+-	.get_tail		= bsd_ring_get_tail,
+-	.get_active_head	= bsd_ring_get_active_head,
+-	.advance_ring		= bsd_ring_advance_ring,
++	.write_tail		= ring_write_tail,
+ 	.flush			= bsd_ring_flush,
+-	.add_request		= bsd_ring_add_request,
+-	.get_gem_seqno		= bsd_ring_get_gem_seqno,
++	.add_request		= ring_add_request,
++	.get_seqno		= ring_status_page_get_seqno,
+ 	.user_irq_get		= bsd_ring_get_user_irq,
+ 	.user_irq_put		= bsd_ring_put_user_irq,
+-	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+-	.status_page		= {NULL, 0, NULL},
+-	.map			= {0,}
++	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
++};
++
++
++static void gen6_bsd_ring_write_tail(struct drm_device *dev,
++				     struct intel_ring_buffer *ring,
++				     u32 value)
++{
++       drm_i915_private_t *dev_priv = dev->dev_private;
++
++       /* Every tail move must follow the sequence below */
++       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
++	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
++	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
++       I915_WRITE(GEN6_BSD_RNCID, 0x0);
++
++       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
++                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
++                       50))
++               DRM_ERROR("timed out waiting for IDLE Indicator\n");
++
++       I915_WRITE_TAIL(ring, value);
++       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
++	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
++	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
++}
++
++static void gen6_ring_flush(struct drm_device *dev,
++			    struct intel_ring_buffer *ring,
++			    u32 invalidate_domains,
++			    u32 flush_domains)
++{
++       intel_ring_begin(dev, ring, 4);
++       intel_ring_emit(dev, ring, MI_FLUSH_DW);
++       intel_ring_emit(dev, ring, 0);
++       intel_ring_emit(dev, ring, 0);
++       intel_ring_emit(dev, ring, 0);
++       intel_ring_advance(dev, ring);
++}
++
++static int
++gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
++				  struct intel_ring_buffer *ring,
++				  struct drm_i915_gem_execbuffer2 *exec,
++				  struct drm_clip_rect *cliprects,
++				  uint64_t exec_offset)
++{
++       uint32_t exec_start;
++
++       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
++
++       intel_ring_begin(dev, ring, 2);
++       intel_ring_emit(dev, ring,
++		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
++       /* bit0-7 is the length on GEN6+ */
++       intel_ring_emit(dev, ring, exec_start);
++       intel_ring_advance(dev, ring);
++
++       return 0;
++}
++
++/* ring buffer for Video Codec for Gen6+ */
++static const struct intel_ring_buffer gen6_bsd_ring = {
++       .name			= "gen6 bsd ring",
++       .id			= RING_BSD,
++       .mmio_base		= GEN6_BSD_RING_BASE,
++       .size			= 32 * PAGE_SIZE,
++       .init			= init_bsd_ring,
++       .write_tail		= gen6_bsd_ring_write_tail,
++       .flush			= gen6_ring_flush,
++       .add_request		= ring_add_request,
++       .get_seqno		= ring_status_page_get_seqno,
++       .user_irq_get		= bsd_ring_get_user_irq,
++       .user_irq_put		= bsd_ring_put_user_irq,
++       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
++};
++
++/* Blitter support (SandyBridge+) */
++
++static void
++blt_ring_get_user_irq(struct drm_device *dev,
++		      struct intel_ring_buffer *ring)
++{
++	/* do nothing */
++}
++static void
++blt_ring_put_user_irq(struct drm_device *dev,
++		      struct intel_ring_buffer *ring)
++{
++	/* do nothing */
++}
++
++
++/* Workaround for some stepping of SNB,
++ * each time when BLT engine ring tail moved,
++ * the first command in the ring to be parsed
++ * should be MI_BATCH_BUFFER_START
++ */
++#define NEED_BLT_WORKAROUND(dev) \
++	(IS_GEN6(dev) && (dev->pdev->revision < 8))
++
++static inline struct drm_i915_gem_object *
++to_blt_workaround(struct intel_ring_buffer *ring)
++{
++	return ring->private;
++}
++
++static int blt_ring_init(struct drm_device *dev,
++			 struct intel_ring_buffer *ring)
++{
++	if (NEED_BLT_WORKAROUND(dev)) {
++		struct drm_i915_gem_object *obj;
++		u32 __iomem *ptr;
++		int ret;
++
++		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
++		if (obj == NULL)
++			return -ENOMEM;
++
++		ret = i915_gem_object_pin(&obj->base, 4096);
++		if (ret) {
++			drm_gem_object_unreference(&obj->base);
++			return ret;
++		}
++
++		ptr = kmap(obj->pages[0]);
++		iowrite32(MI_BATCH_BUFFER_END, ptr);
++		iowrite32(MI_NOOP, ptr+1);
++		kunmap(obj->pages[0]);
++
++		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
++		if (ret) {
++			i915_gem_object_unpin(&obj->base);
++			drm_gem_object_unreference(&obj->base);
++			return ret;
++		}
++
++		ring->private = obj;
++	}
++
++	return init_ring_common(dev, ring);
++}
++
++static void blt_ring_begin(struct drm_device *dev,
++			   struct intel_ring_buffer *ring,
++			  int num_dwords)
++{
++	if (ring->private) {
++		intel_ring_begin(dev, ring, num_dwords+2);
++		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
++		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
++	} else
++		intel_ring_begin(dev, ring, 4);
++}
++
++static void blt_ring_flush(struct drm_device *dev,
++			   struct intel_ring_buffer *ring,
++			   u32 invalidate_domains,
++			   u32 flush_domains)
++{
++	blt_ring_begin(dev, ring, 4);
++	intel_ring_emit(dev, ring, MI_FLUSH_DW);
++	intel_ring_emit(dev, ring, 0);
++	intel_ring_emit(dev, ring, 0);
++	intel_ring_emit(dev, ring, 0);
++	intel_ring_advance(dev, ring);
++}
++
++static u32
++blt_ring_add_request(struct drm_device *dev,
++		     struct intel_ring_buffer *ring,
++		     u32 flush_domains)
++{
++	u32 seqno = i915_gem_get_seqno(dev);
++
++	blt_ring_begin(dev, ring, 4);
++	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
++	intel_ring_emit(dev, ring,
++			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++	intel_ring_emit(dev, ring, seqno);
++	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
++	intel_ring_advance(dev, ring);
++
++	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
++	return seqno;
++}
++
++static void blt_ring_cleanup(struct intel_ring_buffer *ring)
++{
++	if (!ring->private)
++		return;
++
++	i915_gem_object_unpin(ring->private);
++	drm_gem_object_unreference(ring->private);
++	ring->private = NULL;
++}
++
++static const struct intel_ring_buffer gen6_blt_ring = {
++       .name			= "blt ring",
++       .id			= RING_BLT,
++       .mmio_base		= BLT_RING_BASE,
++       .size			= 32 * PAGE_SIZE,
++       .init			= blt_ring_init,
++       .write_tail		= ring_write_tail,
++       .flush			= blt_ring_flush,
++       .add_request		= blt_ring_add_request,
++       .get_seqno		= ring_status_page_get_seqno,
++       .user_irq_get		= blt_ring_get_user_irq,
++       .user_irq_put		= blt_ring_put_user_irq,
++       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
++       .cleanup			= blt_ring_cleanup,
+ };
++
++int intel_init_render_ring_buffer(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	dev_priv->render_ring = render_ring;
++
++	if (!I915_NEED_GFX_HWS(dev)) {
++		dev_priv->render_ring.status_page.page_addr
++			= dev_priv->status_page_dmah->vaddr;
++		memset(dev_priv->render_ring.status_page.page_addr,
++				0, PAGE_SIZE);
++	}
++
++	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
++}
++
++int intel_init_bsd_ring_buffer(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	if (IS_GEN6(dev))
++		dev_priv->bsd_ring = gen6_bsd_ring;
++	else
++		dev_priv->bsd_ring = bsd_ring;
++
++	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
++}
++
++int intel_init_blt_ring_buffer(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	dev_priv->blt_ring = gen6_blt_ring;
++
++	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
++}
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index 525e7d3..3126c26 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -7,25 +7,32 @@ struct  intel_hw_status_page {
+ 	struct		drm_gem_object *obj;
+ };
+ 
++#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
++#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
++#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
++#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
++#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
++#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
++#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
++#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
++
+ struct drm_i915_gem_execbuffer2;
+ struct  intel_ring_buffer {
+ 	const char	*name;
+-	struct		ring_regs {
+-			u32 ctl;
+-			u32 head;
+-			u32 tail;
+-			u32 start;
+-	} regs;
+-	unsigned int	ring_flag;
++	enum intel_ring_id {
++		RING_RENDER = 0x1,
++		RING_BSD = 0x2,
++		RING_BLT = 0x4,
++	} id;
++	u32		mmio_base;
+ 	unsigned long	size;
+-	unsigned int	alignment;
+ 	void		*virtual_start;
+ 	struct		drm_device *dev;
+ 	struct		drm_gem_object *gem_object;
+ 
+ 	unsigned int	head;
+ 	unsigned int	tail;
+-	unsigned int	space;
++	int		space;
+ 	struct intel_hw_status_page status_page;
+ 
+ 	u32		irq_gem_seqno;		/* last seq seem at irq time */
+@@ -35,35 +42,28 @@ struct  intel_ring_buffer {
+ 			struct intel_ring_buffer *ring);
+ 	void		(*user_irq_put)(struct drm_device *dev,
+ 			struct intel_ring_buffer *ring);
+-	void		(*setup_status_page)(struct drm_device *dev,
+-			struct	intel_ring_buffer *ring);
+ 
+ 	int		(*init)(struct drm_device *dev,
+ 			struct intel_ring_buffer *ring);
+ 
+-	unsigned int	(*get_head)(struct drm_device *dev,
+-			struct intel_ring_buffer *ring);
+-	unsigned int	(*get_tail)(struct drm_device *dev,
+-			struct intel_ring_buffer *ring);
+-	unsigned int	(*get_active_head)(struct drm_device *dev,
+-			struct intel_ring_buffer *ring);
+-	void		(*advance_ring)(struct drm_device *dev,
+-			struct intel_ring_buffer *ring);
++	void		(*write_tail)(struct drm_device *dev,
++				      struct intel_ring_buffer *ring,
++				      u32 value);
+ 	void		(*flush)(struct drm_device *dev,
+ 			struct intel_ring_buffer *ring,
+ 			u32	invalidate_domains,
+ 			u32	flush_domains);
+ 	u32		(*add_request)(struct drm_device *dev,
+ 			struct intel_ring_buffer *ring,
+-			struct drm_file *file_priv,
+ 			u32 flush_domains);
+-	u32		(*get_gem_seqno)(struct drm_device *dev,
+-			struct intel_ring_buffer *ring);
++	u32		(*get_seqno)(struct drm_device *dev,
++				     struct intel_ring_buffer *ring);
+ 	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
+ 			struct intel_ring_buffer *ring,
+ 			struct drm_i915_gem_execbuffer2 *exec,
+ 			struct drm_clip_rect *cliprects,
+ 			uint64_t exec_offset);
++	void		(*cleanup)(struct intel_ring_buffer *ring);
+ 
+ 	/**
+ 	 * List of objects currently involved in rendering from the
+@@ -83,8 +83,24 @@ struct  intel_ring_buffer {
+ 	 */
+ 	struct list_head request_list;
+ 
++	/**
++	 * List of objects currently pending a GPU write flush.
++	 *
++	 * All elements on this list will belong to either the
++	 * active_list or flushing_list, last_rendering_seqno can
++	 * be used to differentiate between the two elements.
++	 */
++	struct list_head gpu_write_list;
++
++	/**
++	 * Do we have some not yet emitted requests outstanding?
++	 */
++	bool outstanding_lazy_request;
++
+ 	wait_queue_head_t irq_queue;
+ 	drm_local_map_t map;
++
++	void *private;
+ };
+ 
+ static inline u32
+@@ -96,15 +112,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
+ }
+ 
+ int intel_init_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring);
++			   struct intel_ring_buffer *ring);
+ void intel_cleanup_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring);
++			       struct intel_ring_buffer *ring);
+ int intel_wait_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring, int n);
+-int intel_wrap_ring_buffer(struct drm_device *dev,
+-		struct intel_ring_buffer *ring);
++			   struct intel_ring_buffer *ring, int n);
+ void intel_ring_begin(struct drm_device *dev,
+-		struct intel_ring_buffer *ring, int n);
++		      struct intel_ring_buffer *ring, int n);
+ 
+ static inline void intel_ring_emit(struct drm_device *dev,
+ 				   struct intel_ring_buffer *ring,
+@@ -115,17 +129,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
+ 	ring->tail += 4;
+ }
+ 
+-void intel_fill_struct(struct drm_device *dev,
+-		struct intel_ring_buffer *ring,
+-		void *data,
+-		unsigned int len);
+ void intel_ring_advance(struct drm_device *dev,
+ 		struct intel_ring_buffer *ring);
+ 
+ u32 intel_ring_get_seqno(struct drm_device *dev,
+ 		struct intel_ring_buffer *ring);
+ 
+-extern struct intel_ring_buffer render_ring;
+-extern struct intel_ring_buffer bsd_ring;
++int intel_init_render_ring_buffer(struct drm_device *dev);
++int intel_init_bsd_ring_buffer(struct drm_device *dev);
++int intel_init_blt_ring_buffer(struct drm_device *dev);
++
++u32 intel_ring_get_active_head(struct drm_device *dev,
++			       struct intel_ring_buffer *ring);
++void intel_ring_setup_status_page(struct drm_device *dev,
++				  struct intel_ring_buffer *ring);
+ 
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index ee73e42..de158b7 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
+ struct intel_sdvo {
+ 	struct intel_encoder base;
+ 
++	struct i2c_adapter *i2c;
+ 	u8 slave_addr;
+ 
++	struct i2c_adapter ddc;
++
+ 	/* Register for the SDVO device: SDVOB or SDVOC */
+ 	int sdvo_reg;
+ 
+@@ -104,34 +107,24 @@ struct intel_sdvo {
+ 	 * This is set if we treat the device as HDMI, instead of DVI.
+ 	 */
+ 	bool is_hdmi;
++	bool has_audio;
+ 
+ 	/**
+-	 * This is set if we detect output of sdvo device as LVDS.
++	 * This is set if we detect output of sdvo device as LVDS and
++	 * have a valid fixed mode to use with the panel.
+ 	 */
+ 	bool is_lvds;
+ 
+ 	/**
+-	 * This is sdvo flags for input timing.
+-	 */
+-	uint8_t sdvo_flags;
+-
+-	/**
+ 	 * This is sdvo fixed pannel mode pointer
+ 	 */
+ 	struct drm_display_mode *sdvo_lvds_fixed_mode;
+ 
+-	/*
+-	 * supported encoding mode, used to determine whether HDMI is
+-	 * supported
+-	 */
+-	struct intel_sdvo_encode encode;
+-
+ 	/* DDC bus used by this SDVO encoder */
+ 	uint8_t ddc_bus;
+ 
+-	/* Mac mini hack -- use the same DDC as the analog connector */
+-	struct i2c_adapter *analog_ddc_bus;
+-
++	/* Input timings for adjusted_mode */
++	struct intel_sdvo_dtd input_dtd;
+ };
+ 
+ struct intel_sdvo_connector {
+@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
+ 	/* Mark the type of connector */
+ 	uint16_t output_flag;
+ 
++	int force_audio;
++
+ 	/* This contains all current supported TV format */
+ 	u8 tv_format_supported[TV_FORMAT_NUM];
+ 	int   format_supported_num;
+ 	struct drm_property *tv_format;
+ 
++	struct drm_property *force_audio_property;
++
+ 	/* add the property for the SDVO-TV */
+ 	struct drm_property *left;
+ 	struct drm_property *right;
+@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
+ 	u32	cur_dot_crawl,	max_dot_crawl;
+ };
+ 
+-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
++static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
++{
++	return container_of(encoder, struct intel_sdvo, base.base);
++}
++
++static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_sdvo, base);
+ }
+ 
+ static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+  */
+ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+ {
+-	struct drm_device *dev = intel_sdvo->base.enc.dev;
++	struct drm_device *dev = intel_sdvo->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 bval = val, cval = val;
+ 	int i;
+@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+ 
+ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+ {
+-	u8 out_buf[2] = { addr, 0 };
+-	u8 buf[2];
+ 	struct i2c_msg msgs[] = {
+ 		{
+-			.addr = intel_sdvo->slave_addr >> 1,
++			.addr = intel_sdvo->slave_addr,
+ 			.flags = 0,
+ 			.len = 1,
+-			.buf = out_buf,
++			.buf = &addr,
+ 		},
+ 		{
+-			.addr = intel_sdvo->slave_addr >> 1,
++			.addr = intel_sdvo->slave_addr,
+ 			.flags = I2C_M_RD,
+ 			.len = 1,
+-			.buf = buf,
++			.buf = ch,
+ 		}
+ 	};
+ 	int ret;
+ 
+-	if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
+-	{
+-		*ch = buf[0];
++	if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+ 		return true;
+-	}
+ 
+ 	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ 	return false;
+ }
+ 
+-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
+-{
+-	u8 out_buf[2] = { addr, ch };
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr = intel_sdvo->slave_addr >> 1,
+-			.flags = 0,
+-			.len = 2,
+-			.buf = out_buf,
+-		}
+-	};
+-
+-	return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
+-}
+-
+ #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+ /** Mapping of command numbers to names, for debug output */
+ static const struct _sdvo_cmd_name {
+@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ 	DRM_LOG_KMS("\n");
+ }
+ 
+-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+-				 const void *args, int args_len)
+-{
+-	int i;
+-
+-	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+-
+-	for (i = 0; i < args_len; i++) {
+-		if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
+-					   ((u8*)args)[i]))
+-			return false;
+-	}
+-
+-	return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
+-}
+-
+ static const char *cmd_status_names[] = {
+ 	"Power on",
+ 	"Success",
+@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
+ 	"Scaling not supported"
+ };
+ 
+-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
+-				      void *response, int response_len,
+-				      u8 status)
++static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
++				 const void *args, int args_len)
+ {
+-	int i;
++	u8 buf[args_len*2 + 2], status;
++	struct i2c_msg msgs[args_len + 3];
++	int i, ret;
+ 
+-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+-	for (i = 0; i < response_len; i++)
+-		DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
+-	for (; i < 8; i++)
+-		DRM_LOG_KMS("   ");
+-	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+-		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+-	else
+-		DRM_LOG_KMS("(??? %d)", status);
+-	DRM_LOG_KMS("\n");
++	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
++
++	for (i = 0; i < args_len; i++) {
++		msgs[i].addr = intel_sdvo->slave_addr;
++		msgs[i].flags = 0;
++		msgs[i].len = 2;
++		msgs[i].buf = buf + 2 *i;
++		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
++		buf[2*i + 1] = ((u8*)args)[i];
++	}
++	msgs[i].addr = intel_sdvo->slave_addr;
++	msgs[i].flags = 0;
++	msgs[i].len = 2;
++	msgs[i].buf = buf + 2*i;
++	buf[2*i + 0] = SDVO_I2C_OPCODE;
++	buf[2*i + 1] = cmd;
++
++	/* the following two are to read the response */
++	status = SDVO_I2C_CMD_STATUS;
++	msgs[i+1].addr = intel_sdvo->slave_addr;
++	msgs[i+1].flags = 0;
++	msgs[i+1].len = 1;
++	msgs[i+1].buf = &status;
++
++	msgs[i+2].addr = intel_sdvo->slave_addr;
++	msgs[i+2].flags = I2C_M_RD;
++	msgs[i+2].len = 1;
++	msgs[i+2].buf = &status;
++
++	ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
++	if (ret < 0) {
++		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
++		return false;
++	}
++	if (ret != i+3) {
++		/* failure in I2C transfer */
++		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
++		return false;
++	}
++
++	i = 3;
++	while (status == SDVO_CMD_STATUS_PENDING && i--) {
++		if (!intel_sdvo_read_byte(intel_sdvo,
++					  SDVO_I2C_CMD_STATUS,
++					  &status))
++			return false;
++	}
++	if (status != SDVO_CMD_STATUS_SUCCESS) {
++		DRM_DEBUG_KMS("command returns response %s [%d]\n",
++			      status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
++			      status);
++		return false;
++	}
++
++	return true;
+ }
+ 
+ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ 				     void *response, int response_len)
+ {
+-	int i;
++	u8 retry = 5;
+ 	u8 status;
+-	u8 retry = 50;
+-
+-	while (retry--) {
+-		/* Read the command response */
+-		for (i = 0; i < response_len; i++) {
+-			if (!intel_sdvo_read_byte(intel_sdvo,
+-						  SDVO_I2C_RETURN_0 + i,
+-						  &((u8 *)response)[i]))
+-				return false;
+-		}
++	int i;
+ 
+-		/* read the return status */
+-		if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
++	/*
++	 * The documentation states that all commands will be
++	 * processed within 15µs, and that we need only poll
++	 * the status byte a maximum of 3 times in order for the
++	 * command to be complete.
++	 *
++	 * Check 5 times in case the hardware failed to read the docs.
++	 */
++	do {
++		if (!intel_sdvo_read_byte(intel_sdvo,
++					  SDVO_I2C_CMD_STATUS,
+ 					  &status))
+ 			return false;
++	} while (status == SDVO_CMD_STATUS_PENDING && --retry);
+ 
+-		intel_sdvo_debug_response(intel_sdvo, response, response_len,
+-					  status);
+-		if (status != SDVO_CMD_STATUS_PENDING)
+-			break;
++	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
++	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
++	else
++		DRM_LOG_KMS("(??? %d)", status);
+ 
+-		mdelay(50);
++	if (status != SDVO_CMD_STATUS_SUCCESS)
++		goto log_fail;
++
++	/* Read the command response */
++	for (i = 0; i < response_len; i++) {
++		if (!intel_sdvo_read_byte(intel_sdvo,
++					  SDVO_I2C_RETURN_0 + i,
++					  &((u8 *)response)[i]))
++			goto log_fail;
++		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ 	}
++	DRM_LOG_KMS("\n");
++	return true;
+ 
+-	return status == SDVO_CMD_STATUS_SUCCESS;
++log_fail:
++	DRM_LOG_KMS("\n");
++	return false;
+ }
+ 
+ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+ 		return 4;
+ }
+ 
+-/**
+- * Try to read the response after issuie the DDC switch command. But it
+- * is noted that we must do the action of reading response and issuing DDC
+- * switch command in one I2C transaction. Otherwise when we try to start
+- * another I2C transaction after issuing the DDC bus switch, it will be
+- * switched to the internal SDVO register.
+- */
+-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+-					      u8 target)
++static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
++					      u8 ddc_bus)
+ {
+-	u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr = intel_sdvo->slave_addr >> 1,
+-			.flags = 0,
+-			.len = 2,
+-			.buf = out_buf,
+-		},
+-		/* the following two are to read the response */
+-		{
+-			.addr = intel_sdvo->slave_addr >> 1,
+-			.flags = 0,
+-			.len = 1,
+-			.buf = cmd_buf,
+-		},
+-		{
+-			.addr = intel_sdvo->slave_addr >> 1,
+-			.flags = I2C_M_RD,
+-			.len = 1,
+-			.buf = ret_value,
+-		},
+-	};
+-
+-	intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+-					&target, 1);
+-	/* write the DDC switch command argument */
+-	intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
+-
+-	out_buf[0] = SDVO_I2C_OPCODE;
+-	out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
+-	cmd_buf[0] = SDVO_I2C_CMD_STATUS;
+-	cmd_buf[1] = 0;
+-	ret_value[0] = 0;
+-	ret_value[1] = 0;
+-
+-	ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
+-	if (ret != 3) {
+-		/* failure in I2C transfer */
+-		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+-		return;
+-	}
+-	if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
+-		DRM_DEBUG_KMS("DDC switch command returns response %d\n",
+-					ret_value[0]);
+-		return;
+-	}
+-	return;
++	return intel_sdvo_write_cmd(intel_sdvo,
++				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++				    &ddc_bus, 1);
+ }
+ 
+ static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+ {
+-	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+-		return false;
+-
+-	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
++	return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+ }
+ 
+ static bool
+@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ 		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ 
+-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
+-				       struct intel_sdvo_encode *encode)
++static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+ {
+-	if (intel_sdvo_get_value(intel_sdvo,
+-				  SDVO_CMD_GET_SUPP_ENCODE,
+-				  encode, sizeof(*encode)))
+-		return true;
++	struct intel_sdvo_encode encode;
+ 
+-	/* non-support means DVI */
+-	memset(encode, 0, sizeof(*encode));
+-	return false;
++	return intel_sdvo_get_value(intel_sdvo,
++				  SDVO_CMD_GET_SUPP_ENCODE,
++				  &encode, sizeof(encode));
+ }
+ 
+ static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+ }
+ #endif
+ 
+-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
+-				    int index,
+-				    uint8_t *data, int8_t size, uint8_t tx_rate)
+-{
+-    uint8_t set_buf_index[2];
+-
+-    set_buf_index[0] = index;
+-    set_buf_index[1] = 0;
+-
+-    if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+-			      set_buf_index, 2))
+-	    return false;
+-
+-    for (; size > 0; size -= 8) {
+-	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
+-		return false;
+-
+-	data += 8;
+-    }
+-
+-    return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+-}
+-
+-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
+-{
+-	uint8_t csum = 0;
+-	int i;
+-
+-	for (i = 0; i < size; i++)
+-		csum += data[i];
+-
+-	return 0x100 - csum;
+-}
+-
+-#define DIP_TYPE_AVI	0x82
+-#define DIP_VERSION_AVI	0x2
+-#define DIP_LEN_AVI	13
+-
+-struct dip_infoframe {
+-	uint8_t type;
+-	uint8_t version;
+-	uint8_t len;
+-	uint8_t checksum;
+-	union {
+-		struct {
+-			/* Packet Byte #1 */
+-			uint8_t S:2;
+-			uint8_t B:2;
+-			uint8_t A:1;
+-			uint8_t Y:2;
+-			uint8_t rsvd1:1;
+-			/* Packet Byte #2 */
+-			uint8_t R:4;
+-			uint8_t M:2;
+-			uint8_t C:2;
+-			/* Packet Byte #3 */
+-			uint8_t SC:2;
+-			uint8_t Q:2;
+-			uint8_t EC:3;
+-			uint8_t ITC:1;
+-			/* Packet Byte #4 */
+-			uint8_t VIC:7;
+-			uint8_t rsvd2:1;
+-			/* Packet Byte #5 */
+-			uint8_t PR:4;
+-			uint8_t rsvd3:4;
+-			/* Packet Byte #6~13 */
+-			uint16_t top_bar_end;
+-			uint16_t bottom_bar_start;
+-			uint16_t left_bar_end;
+-			uint16_t right_bar_start;
+-		} avi;
+-		struct {
+-			/* Packet Byte #1 */
+-			uint8_t channel_count:3;
+-			uint8_t rsvd1:1;
+-			uint8_t coding_type:4;
+-			/* Packet Byte #2 */
+-			uint8_t sample_size:2; /* SS0, SS1 */
+-			uint8_t sample_frequency:3;
+-			uint8_t rsvd2:3;
+-			/* Packet Byte #3 */
+-			uint8_t coding_type_private:5;
+-			uint8_t rsvd3:3;
+-			/* Packet Byte #4 */
+-			uint8_t channel_allocation;
+-			/* Packet Byte #5 */
+-			uint8_t rsvd4:3;
+-			uint8_t level_shift:4;
+-			uint8_t downmix_inhibit:1;
+-		} audio;
+-		uint8_t payload[28];
+-	} __attribute__ ((packed)) u;
+-} __attribute__((packed));
+-
+-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+-					 struct drm_display_mode * mode)
++static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+ {
+ 	struct dip_infoframe avi_if = {
+ 		.type = DIP_TYPE_AVI,
+-		.version = DIP_VERSION_AVI,
++		.ver = DIP_VERSION_AVI,
+ 		.len = DIP_LEN_AVI,
+ 	};
++	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
++	uint8_t set_buf_index[2] = { 1, 0 };
++	uint64_t *data = (uint64_t *)&avi_if;
++	unsigned i;
++
++	intel_dip_infoframe_csum(&avi_if);
++
++	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
++				  set_buf_index, 2))
++		return false;
+ 
+-	avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
+-						    4 + avi_if.len);
+-	return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
+-				       4 + avi_if.len,
+-				       SDVO_HBUF_TX_VSYNC);
++	for (i = 0; i < sizeof(avi_if); i += 8) {
++		if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
++					  data, 8))
++			return false;
++		data++;
++	}
++
++	return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
++				    &tx_rate, 1);
+ }
+ 
+ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ 					struct drm_display_mode *mode,
+ 					struct drm_display_mode *adjusted_mode)
+ {
+-	struct intel_sdvo_dtd input_dtd;
+-
+ 	/* Reset the input timing to the screen. Assume always input 0. */
+ 	if (!intel_sdvo_set_target_input(intel_sdvo))
+ 		return false;
+@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ 		return false;
+ 
+ 	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+-						   &input_dtd))
++						   &intel_sdvo->input_dtd))
+ 		return false;
+ 
+-	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+-	intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
++	intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ 
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+-	mode->clock = adjusted_mode->clock;
+ 	return true;
+ }
+ 
+@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ 				  struct drm_display_mode *mode,
+ 				  struct drm_display_mode *adjusted_mode)
+ {
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
++	int multiplier;
+ 
+ 	/* We need to construct preferred input timings based on our
+ 	 * output timings.  To do that, we have to set the output
+@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ 							     mode,
+ 							     adjusted_mode);
+ 	} else if (intel_sdvo->is_lvds) {
+-		drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
+-
+ 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+-							    intel_sdvo->sdvo_lvds_fixed_mode))
++							     intel_sdvo->sdvo_lvds_fixed_mode))
+ 			return false;
+ 
+ 		(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ 	}
+ 
+ 	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
+-	 * SDVO device will be told of the multiplier during mode_set.
++	 * SDVO device will factor out the multiplier during mode_set.
+ 	 */
+-	adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
++	multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
++	intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ 
+ 	return true;
+ }
+@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_crtc *crtc = encoder->crtc;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-	u32 sdvox = 0;
+-	int sdvo_pixel_multiply, rate;
++	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
++	u32 sdvox;
+ 	struct intel_sdvo_in_out_map in_out;
+ 	struct intel_sdvo_dtd input_dtd;
++	int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
++	int rate;
+ 
+ 	if (!mode)
+ 		return;
+@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 			     SDVO_CMD_SET_IN_OUT_MAP,
+ 			     &in_out, sizeof(in_out));
+ 
+-	if (intel_sdvo->is_hdmi) {
+-		if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+-			return;
+-
+-		sdvox |= SDVO_AUDIO_ENABLE;
+-	}
++	/* Set the output timings to the screen */
++	if (!intel_sdvo_set_target_output(intel_sdvo,
++					  intel_sdvo->attached_output))
++		return;
+ 
+ 	/* We have tried to get input timing in mode_fixup, and filled into
+-	   adjusted_mode */
+-	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+-	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+-		input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
+-
+-	/* If it's a TV, we already set the output timing in mode_fixup.
+-	 * Otherwise, the output timing is equal to the input timing.
++	 * adjusted_mode.
+ 	 */
+-	if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
++	if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
++		input_dtd = intel_sdvo->input_dtd;
++	} else {
+ 		/* Set the output timing to the screen */
+ 		if (!intel_sdvo_set_target_output(intel_sdvo,
+ 						  intel_sdvo->attached_output))
+ 			return;
+ 
++		intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ 		(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
+ 	}
+ 
+@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 	if (!intel_sdvo_set_target_input(intel_sdvo))
+ 		return;
+ 
+-	if (intel_sdvo->is_tv) {
+-		if (!intel_sdvo_set_tv_format(intel_sdvo))
+-			return;
+-	}
++	if (intel_sdvo->is_hdmi &&
++	    !intel_sdvo_set_avi_infoframe(intel_sdvo))
++		return;
+ 
+-	/* We would like to use intel_sdvo_create_preferred_input_timing() to
+-	 * provide the device with a timing it can support, if it supports that
+-	 * feature.  However, presumably we would need to adjust the CRTC to
+-	 * output the preferred timing, and we don't support that currently.
+-	 */
+-#if 0
+-	success = intel_sdvo_create_preferred_input_timing(encoder, clock,
+-							   width, height);
+-	if (success) {
+-		struct intel_sdvo_dtd *input_dtd;
++	if (intel_sdvo->is_tv &&
++	    !intel_sdvo_set_tv_format(intel_sdvo))
++		return;
+ 
+-		intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
+-		intel_sdvo_set_input_timing(encoder, &input_dtd);
+-	}
+-#else
+ 	(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+-#endif
+ 
+-	sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+-	switch (sdvo_pixel_multiply) {
++	switch (pixel_multiplier) {
++	default:
+ 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ 	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ 	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 		return;
+ 
+ 	/* Set the SDVO control regs. */
+-	if (IS_I965G(dev)) {
+-		sdvox |= SDVO_BORDER_ENABLE;
++	if (INTEL_INFO(dev)->gen >= 4) {
++		sdvox = SDVO_BORDER_ENABLE;
+ 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ 			sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ 			sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ 	} else {
+-		sdvox |= I915_READ(intel_sdvo->sdvo_reg);
++		sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ 		switch (intel_sdvo->sdvo_reg) {
+ 		case SDVOB:
+ 			sdvox &= SDVOB_PRESERVE_MASK;
+@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ 	}
+ 	if (intel_crtc->pipe == 1)
+ 		sdvox |= SDVO_PIPE_B_SELECT;
++	if (intel_sdvo->has_audio)
++		sdvox |= SDVO_AUDIO_ENABLE;
+ 
+-	if (IS_I965G(dev)) {
++	if (INTEL_INFO(dev)->gen >= 4) {
+ 		/* done in crtc_mode_set as the dpll_md reg must be written early */
+ 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ 		/* done in crtc_mode_set as it lives inside the dpll register */
+ 	} else {
+-		sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++		sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ 	}
+ 
+-	if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
++	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ 		sdvox |= SDVO_STALL_SELECT;
+ 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+ }
+@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ 	u32 temp;
+ 
+@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ 				 struct drm_display_mode *mode)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ 
+ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+ {
+-	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
++	if (!intel_sdvo_get_value(intel_sdvo,
++				  SDVO_CMD_GET_DEVICE_CAPS,
++				  caps, sizeof(*caps)))
++		return false;
++
++	DRM_DEBUG_KMS("SDVO capabilities:\n"
++		      "  vendor_id: %d\n"
++		      "  device_id: %d\n"
++		      "  device_rev_id: %d\n"
++		      "  sdvo_version_major: %d\n"
++		      "  sdvo_version_minor: %d\n"
++		      "  sdvo_inputs_mask: %d\n"
++		      "  smooth_scaling: %d\n"
++		      "  sharp_scaling: %d\n"
++		      "  up_scaling: %d\n"
++		      "  down_scaling: %d\n"
++		      "  stall_support: %d\n"
++		      "  output_flags: %d\n",
++		      caps->vendor_id,
++		      caps->device_id,
++		      caps->device_rev_id,
++		      caps->sdvo_version_major,
++		      caps->sdvo_version_minor,
++		      caps->sdvo_inputs_mask,
++		      caps->smooth_scaling,
++		      caps->sharp_scaling,
++		      caps->up_scaling,
++		      caps->down_scaling,
++		      caps->stall_support,
++		      caps->output_flags);
++
++	return true;
+ }
+ 
+ /* No use! */
+@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+ 	return (caps > 1);
+ }
+ 
++static struct edid *
++intel_sdvo_get_edid(struct drm_connector *connector)
++{
++	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
++	return drm_get_edid(connector, &sdvo->ddc);
++}
++
+ static struct drm_connector *
+ intel_find_analog_connector(struct drm_device *dev)
+ {
+ 	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	struct intel_sdvo *intel_sdvo;
+-
+-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-		intel_sdvo = enc_to_intel_sdvo(encoder);
+-		if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
+-			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-				if (encoder == intel_attached_encoder(connector))
++	struct intel_sdvo *encoder;
++
++	list_for_each_entry(encoder,
++			    &dev->mode_config.encoder_list,
++			    base.base.head) {
++		if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
++			list_for_each_entry(connector,
++					    &dev->mode_config.connector_list,
++					    head) {
++				if (&encoder->base ==
++				    intel_attached_encoder(connector))
+ 					return connector;
+ 			}
+ 		}
+ 	}
++
+ 	return NULL;
+ }
+ 
+@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
+ 	return true;
+ }
+ 
++/* Mac mini hack -- use the same DDC as the analog connector */
++static struct edid *
++intel_sdvo_get_analog_edid(struct drm_connector *connector)
++{
++	struct drm_i915_private *dev_priv = connector->dev->dev_private;
++
++	if (!intel_analog_is_connected(connector->dev))
++		return NULL;
++
++	return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
++}
++
+ enum drm_connector_status
+ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+-	enum drm_connector_status status = connector_status_connected;
+-	struct edid *edid = NULL;
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
++	enum drm_connector_status status;
++	struct edid *edid;
+ 
+-	edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
++	edid = intel_sdvo_get_edid(connector);
+ 
+-	/* This is only applied to SDVO cards with multiple outputs */
+ 	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+-		uint8_t saved_ddc, temp_ddc;
+-		saved_ddc = intel_sdvo->ddc_bus;
+-		temp_ddc = intel_sdvo->ddc_bus >> 1;
++		u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
++
+ 		/*
+ 		 * Don't use the 1 as the argument of DDC bus switch to get
+ 		 * the EDID. It is used for SDVO SPD ROM.
+ 		 */
+-		while(temp_ddc > 1) {
+-			intel_sdvo->ddc_bus = temp_ddc;
+-			edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+-			if (edid) {
+-				/*
+-				 * When we can get the EDID, maybe it is the
+-				 * correct DDC bus. Update it.
+-				 */
+-				intel_sdvo->ddc_bus = temp_ddc;
++		for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
++			intel_sdvo->ddc_bus = ddc;
++			edid = intel_sdvo_get_edid(connector);
++			if (edid)
+ 				break;
+-			}
+-			temp_ddc >>= 1;
+ 		}
++		/*
++		 * If we found the EDID on the other bus,
++		 * assume that is the correct DDC bus.
++		 */
+ 		if (edid == NULL)
+ 			intel_sdvo->ddc_bus = saved_ddc;
+ 	}
+-	/* when there is no edid and no monitor is connected with VGA
+-	 * port, try to use the CRT ddc to read the EDID for DVI-connector
++
++	/*
++	 * When there is no edid and no monitor is connected with VGA
++	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ 	 */
+-	if (edid == NULL && intel_sdvo->analog_ddc_bus &&
+-	    !intel_analog_is_connected(connector->dev))
+-		edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
++	if (edid == NULL)
++		edid = intel_sdvo_get_analog_edid(connector);
+ 
++	status = connector_status_unknown;
+ 	if (edid != NULL) {
+-		bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+-		bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
+-
+ 		/* DDC bus is shared, match EDID to connector type */
+-		if (is_digital && need_digital)
++		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++			status = connector_status_connected;
+ 			intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
+-		else if (is_digital != need_digital)
+-			status = connector_status_disconnected;
+-
++			intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
++		}
+ 		connector->display_info.raw_edid = NULL;
+-	} else
+-		status = connector_status_disconnected;
+-	
+-	kfree(edid);
++		kfree(edid);
++	}
++
++	if (status == connector_status_connected) {
++		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
++		if (intel_sdvo_connector->force_audio)
++			intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
++	}
+ 
+ 	return status;
+ }
+@@ -1490,13 +1408,12 @@ static enum drm_connector_status
+ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ {
+ 	uint16_t response;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ 	enum drm_connector_status ret;
+ 
+ 	if (!intel_sdvo_write_cmd(intel_sdvo,
+-			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
++				  SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ 		return connector_status_unknown;
+ 	if (intel_sdvo->is_tv) {
+ 		/* add 30ms delay when the output type is SDVO-TV */
+@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ 	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ 		return connector_status_unknown;
+ 
+-	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
++	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
++		      response & 0xff, response >> 8,
++		      intel_sdvo_connector->output_flag);
+ 
+ 	if (response == 0)
+ 		return connector_status_disconnected;
+@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ 
+ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-	int num_modes;
++	struct edid *edid;
+ 
+ 	/* set the bus switch and get the modes */
+-	num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
++	edid = intel_sdvo_get_edid(connector);
+ 
+ 	/*
+ 	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+ 	 * DDC fails, check to see if the analog output is disconnected, in
+ 	 * which case we'll look there for the digital DDC data.
+ 	 */
+-	if (num_modes == 0 &&
+-	    intel_sdvo->analog_ddc_bus &&
+-	    !intel_analog_is_connected(connector->dev)) {
+-		/* Switch to the analog ddc bus and try that
+-		 */
+-		(void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
++	if (edid == NULL)
++		edid = intel_sdvo_get_analog_edid(connector);
++
++	if (edid != NULL) {
++		drm_mode_connector_update_edid_property(connector, edid);
++		drm_add_edid_modes(connector, edid);
++		connector->display_info.raw_edid = NULL;
++		kfree(edid);
+ 	}
+ }
+ 
+@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
+ 
+ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ 	struct intel_sdvo_sdtv_resolution_request tv_res;
+ 	uint32_t reply = 0, format_map = 0;
+ 	int i;
+@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ 		return;
+ 
+ 	BUILD_BUG_ON(sizeof(tv_res) != 3);
+-	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
++	if (!intel_sdvo_write_cmd(intel_sdvo,
++				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ 				  &tv_res, sizeof(tv_res)))
+ 		return;
+ 	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ 
+ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ 	struct drm_display_mode *newmode;
+ 
+@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ 	 * Assume that the preferred modes are
+ 	 * arranged in priority order.
+ 	 */
+-	intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
++	intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ 	if (list_empty(&connector->probed_modes) == false)
+ 		goto end;
+ 
+@@ -1693,6 +1611,10 @@ end:
+ 		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ 			intel_sdvo->sdvo_lvds_fixed_mode =
+ 				drm_mode_duplicate(connector->dev, newmode);
++
++			drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
++					      0);
++
+ 			intel_sdvo->is_lvds = true;
+ 			break;
+ 		}
+@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
+ 			struct drm_property *property,
+ 			uint64_t val)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ 	uint16_t temp_value;
+ 	uint8_t cmd;
+@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
+ 	if (ret)
+ 		return ret;
+ 
++	if (property == intel_sdvo_connector->force_audio_property) {
++		if (val == intel_sdvo_connector->force_audio)
++			return 0;
++
++		intel_sdvo_connector->force_audio = val;
++
++		if (val > 0 && intel_sdvo->has_audio)
++			return 0;
++		if (val < 0 && !intel_sdvo->has_audio)
++			return 0;
++
++		intel_sdvo->has_audio = val > 0;
++		goto done;
++	}
++
+ #define CHECK_PROPERTY(name, NAME) \
+ 	if (intel_sdvo_connector->name == property) { \
+ 		if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+@@ -1879,9 +1815,8 @@ set_value:
+ 
+ 
+ done:
+-	if (encoder->crtc) {
+-		struct drm_crtc *crtc = encoder->crtc;
+-
++	if (intel_sdvo->base.base.crtc) {
++		struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ 		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ 					 crtc->y, crtc->fb);
+ 	}
+@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ 	.get_modes = intel_sdvo_get_modes,
+ 	.mode_valid = intel_sdvo_mode_valid,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+ {
+-	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-
+-	if (intel_sdvo->analog_ddc_bus)
+-		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
++	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ 
+ 	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ 		drm_mode_destroy(encoder->dev,
+ 				 intel_sdvo->sdvo_lvds_fixed_mode);
+ 
++	i2c_del_adapter(&intel_sdvo->ddc);
+ 	intel_encoder_destroy(encoder);
+ }
+ 
+@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ 		intel_sdvo_guess_ddc_bus(sdvo);
+ }
+ 
+-static bool
+-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
++static void
++intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
++			  struct intel_sdvo *sdvo, u32 reg)
+ {
+-	return intel_sdvo_set_target_output(intel_sdvo,
+-					    device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+-		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+-				     &intel_sdvo->is_hdmi, 1);
+-}
++	struct sdvo_device_mapping *mapping;
++	u8 pin, speed;
+ 
+-static struct intel_sdvo *
+-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
+-{
+-	struct drm_device *dev = chan->drm_dev;
+-	struct drm_encoder *encoder;
++	if (IS_SDVOB(reg))
++		mapping = &dev_priv->sdvo_mappings[0];
++	else
++		mapping = &dev_priv->sdvo_mappings[1];
+ 
+-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-		struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-		if (intel_sdvo->base.ddc_bus == &chan->adapter)
+-			return intel_sdvo;
++	pin = GMBUS_PORT_DPB;
++	speed = GMBUS_RATE_1MHZ >> 8;
++	if (mapping->initialized) {
++		pin = mapping->i2c_pin;
++		speed = mapping->i2c_speed;
+ 	}
+ 
+-	return NULL;
++	sdvo->i2c = &dev_priv->gmbus[pin].adapter;
++	intel_gmbus_set_speed(sdvo->i2c, speed);
++	intel_gmbus_force_bit(sdvo->i2c, true);
+ }
+ 
+-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
+-				  struct i2c_msg msgs[], int num)
++static bool
++intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+ {
+-	struct intel_sdvo *intel_sdvo;
+-	struct i2c_algo_bit_data *algo_data;
+-	const struct i2c_algorithm *algo;
++	int is_hdmi;
+ 
+-	algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
+-	intel_sdvo =
+-		intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
+-					      (algo_data->data));
+-	if (intel_sdvo == NULL)
+-		return -EINVAL;
++	if (!intel_sdvo_check_supp_encode(intel_sdvo))
++		return false;
+ 
+-	algo = intel_sdvo->base.i2c_bus->algo;
++	if (!intel_sdvo_set_target_output(intel_sdvo,
++					  device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
++		return false;
+ 
+-	intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
+-	return algo->master_xfer(i2c_adap, msgs, num);
+-}
++	is_hdmi = 0;
++	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
++		return false;
+ 
+-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
+-	.master_xfer	= intel_sdvo_master_xfer,
+-};
++	return !!is_hdmi;
++}
+ 
+ static u8
+ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+ }
+ 
+ static void
+-intel_sdvo_connector_init(struct drm_encoder *encoder,
+-			  struct drm_connector *connector)
++intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
++			  struct intel_sdvo *encoder)
+ {
+-	drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+-			   connector->connector_type);
++	drm_connector_init(encoder->base.base.dev,
++			   &connector->base.base,
++			   &intel_sdvo_connector_funcs,
++			   connector->base.base.connector_type);
++
++	drm_connector_helper_add(&connector->base.base,
++				 &intel_sdvo_connector_helper_funcs);
++
++	connector->base.base.interlace_allowed = 0;
++	connector->base.base.doublescan_allowed = 0;
++	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ 
+-	drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
++	intel_connector_attach_encoder(&connector->base, &encoder->base);
++	drm_sysfs_connector_add(&connector->base.base);
++}
+ 
+-	connector->interlace_allowed = 0;
+-	connector->doublescan_allowed = 0;
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++static void
++intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
++{
++	struct drm_device *dev = connector->base.base.dev;
+ 
+-	drm_mode_connector_attach_encoder(connector, encoder);
+-	drm_sysfs_connector_add(connector);
++	connector->force_audio_property =
++		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++	if (connector->force_audio_property) {
++		connector->force_audio_property->values[0] = -1;
++		connector->force_audio_property->values[1] = 1;
++		drm_connector_attach_property(&connector->base.base,
++					      connector->force_audio_property, 0);
++	}
+ }
+ 
+ static bool
+ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+-	struct drm_encoder *encoder = &intel_sdvo->base.enc;
++	struct drm_encoder *encoder = &intel_sdvo->base.base;
+ 	struct drm_connector *connector;
+ 	struct intel_connector *intel_connector;
+ 	struct intel_sdvo_connector *intel_sdvo_connector;
+@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ 	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+ 
+-	if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
+-		&& intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
+-		&& intel_sdvo->is_hdmi) {
++	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ 		/* enable hdmi encoding mode if supported */
+ 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ 		intel_sdvo_set_colorimetry(intel_sdvo,
+ 					   SDVO_COLORIMETRY_RGB256);
+ 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
++		intel_sdvo->is_hdmi = true;
+ 	}
+ 	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ 				       (1 << INTEL_ANALOG_CLONE_BIT));
+ 
+-	intel_sdvo_connector_init(encoder, connector);
++	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
++
++	intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ 
+ 	return true;
+ }
+@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ static bool
+ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+ {
+-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
+-        struct drm_connector *connector;
+-        struct intel_connector *intel_connector;
+-        struct intel_sdvo_connector *intel_sdvo_connector;
++	struct drm_encoder *encoder = &intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct intel_connector *intel_connector;
++	struct intel_sdvo_connector *intel_sdvo_connector;
+ 
+ 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ 	if (!intel_sdvo_connector)
+ 		return false;
+ 
+ 	intel_connector = &intel_sdvo_connector->base;
+-        connector = &intel_connector->base;
+-        encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+-        connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
++	connector = &intel_connector->base;
++	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
++	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ 
+-        intel_sdvo->controlled_output |= type;
+-        intel_sdvo_connector->output_flag = type;
++	intel_sdvo->controlled_output |= type;
++	intel_sdvo_connector->output_flag = type;
+ 
+-        intel_sdvo->is_tv = true;
+-        intel_sdvo->base.needs_tv_clock = true;
+-        intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
++	intel_sdvo->is_tv = true;
++	intel_sdvo->base.needs_tv_clock = true;
++	intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ 
+-        intel_sdvo_connector_init(encoder, connector);
++	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ 
+-        if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
++	if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ 		goto err;
+ 
+-        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
++	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ 		goto err;
+ 
+-        return true;
++	return true;
+ 
+ err:
+ 	intel_sdvo_destroy(connector);
+@@ -2177,43 +2124,44 @@ err:
+ static bool
+ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
+-        struct drm_connector *connector;
+-        struct intel_connector *intel_connector;
+-        struct intel_sdvo_connector *intel_sdvo_connector;
++	struct drm_encoder *encoder = &intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct intel_connector *intel_connector;
++	struct intel_sdvo_connector *intel_sdvo_connector;
+ 
+ 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ 	if (!intel_sdvo_connector)
+ 		return false;
+ 
+ 	intel_connector = &intel_sdvo_connector->base;
+-        connector = &intel_connector->base;
++	connector = &intel_connector->base;
+ 	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+-        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+-        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+-
+-        if (device == 0) {
+-                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+-        } else if (device == 1) {
+-                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+-        }
+-
+-        intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
++	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
++	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
++
++	if (device == 0) {
++		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
++		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
++	} else if (device == 1) {
++		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
++		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
++	}
++
++	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ 				       (1 << INTEL_ANALOG_CLONE_BIT));
+ 
+-        intel_sdvo_connector_init(encoder, connector);
+-        return true;
++	intel_sdvo_connector_init(intel_sdvo_connector,
++				  intel_sdvo);
++	return true;
+ }
+ 
+ static bool
+ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
+-        struct drm_connector *connector;
+-        struct intel_connector *intel_connector;
+-        struct intel_sdvo_connector *intel_sdvo_connector;
++	struct drm_encoder *encoder = &intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct intel_connector *intel_connector;
++	struct intel_sdvo_connector *intel_sdvo_connector;
+ 
+ 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ 	if (!intel_sdvo_connector)
+@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ 
+ 	intel_connector = &intel_sdvo_connector->base;
+ 	connector = &intel_connector->base;
+-        encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+-        connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+-
+-        if (device == 0) {
+-                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+-        } else if (device == 1) {
+-                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+-        }
+-
+-        intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
++	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
++	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
++
++	if (device == 0) {
++		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
++		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
++	} else if (device == 1) {
++		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
++		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
++	}
++
++	intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ 				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+ 
+-        intel_sdvo_connector_init(encoder, connector);
+-        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
++	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
++	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ 		goto err;
+ 
+ 	return true;
+@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ 					  struct intel_sdvo_connector *intel_sdvo_connector,
+ 					  int type)
+ {
+-	struct drm_device *dev = intel_sdvo->base.enc.dev;
++	struct drm_device *dev = intel_sdvo->base.base.dev;
+ 	struct intel_sdvo_tv_format format;
+ 	uint32_t format_map, i;
+ 
+@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ 				      struct intel_sdvo_connector *intel_sdvo_connector,
+ 				      struct intel_sdvo_enhancements_reply enhancements)
+ {
+-	struct drm_device *dev = intel_sdvo->base.enc.dev;
++	struct drm_device *dev = intel_sdvo->base.base.dev;
+ 	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ 	uint16_t response, data_value[2];
+ 
+@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ 					struct intel_sdvo_connector *intel_sdvo_connector,
+ 					struct intel_sdvo_enhancements_reply enhancements)
+ {
+-	struct drm_device *dev = intel_sdvo->base.enc.dev;
++	struct drm_device *dev = intel_sdvo->base.base.dev;
+ 	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ 	uint16_t response, data_value[2];
+ 
+@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ 		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ 	else
+ 		return true;
++}
++
++static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
++				     struct i2c_msg *msgs,
++				     int num)
++{
++	struct intel_sdvo *sdvo = adapter->algo_data;
+ 
++	if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
++		return -EIO;
++
++	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
++}
++
++static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
++{
++	struct intel_sdvo *sdvo = adapter->algo_data;
++	return sdvo->i2c->algo->functionality(sdvo->i2c);
++}
++
++static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
++	.master_xfer	= intel_sdvo_ddc_proxy_xfer,
++	.functionality	= intel_sdvo_ddc_proxy_func
++};
++
++static bool
++intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
++			  struct drm_device *dev)
++{
++	sdvo->ddc.owner = THIS_MODULE;
++	sdvo->ddc.class = I2C_CLASS_DDC;
++	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
++	sdvo->ddc.dev.parent = &dev->pdev->dev;
++	sdvo->ddc.algo_data = sdvo;
++	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
++
++	return i2c_add_adapter(&sdvo->ddc) == 0;
+ }
+ 
+ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *intel_encoder;
+ 	struct intel_sdvo *intel_sdvo;
+-	u8 ch[0x40];
+ 	int i;
+-	u32 i2c_reg, ddc_reg, analog_ddc_reg;
+ 
+ 	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ 	if (!intel_sdvo)
+ 		return false;
+ 
++	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
++		kfree(intel_sdvo);
++		return false;
++	}
++
+ 	intel_sdvo->sdvo_reg = sdvo_reg;
+ 
+ 	intel_encoder = &intel_sdvo->base;
+ 	intel_encoder->type = INTEL_OUTPUT_SDVO;
++	/* encoder type will be decided later */
++	drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+ 
+-	if (HAS_PCH_SPLIT(dev)) {
+-		i2c_reg = PCH_GPIOE;
+-		ddc_reg = PCH_GPIOE;
+-		analog_ddc_reg = PCH_GPIOA;
+-	} else {
+-		i2c_reg = GPIOE;
+-		ddc_reg = GPIOE;
+-		analog_ddc_reg = GPIOA;
+-	}
+-
+-	/* setup the DDC bus. */
+-	if (IS_SDVOB(sdvo_reg))
+-		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
+-	else
+-		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
+-
+-	if (!intel_encoder->i2c_bus)
+-		goto err_inteloutput;
+-
+-	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+-
+-	/* Save the bit-banging i2c functionality for use by the DDC wrapper */
+-	intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
++	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
++	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+ 
+ 	/* Read the regs to test if we can talk to the device */
+ 	for (i = 0; i < 0x40; i++) {
+-		if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
++		u8 byte;
++
++		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ 			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ 				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+-			goto err_i2c;
++			goto err;
+ 		}
+ 	}
+ 
+-	/* setup the DDC bus. */
+-	if (IS_SDVOB(sdvo_reg)) {
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+-		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+-						"SDVOB/VGA DDC BUS");
++	if (IS_SDVOB(sdvo_reg))
+ 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+-	} else {
+-		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+-		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+-						"SDVOC/VGA DDC BUS");
++	else
+ 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+-	}
+-	if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
+-		goto err_i2c;
+ 
+-	/* Wrap with our custom algo which switches to DDC mode */
+-	intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+-
+-	/* encoder type will be decided later */
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+-	drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
++	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ 
+ 	/* In default case sdvo lvds is false */
+ 	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+-		goto err_enc;
++		goto err;
+ 
+ 	if (intel_sdvo_output_setup(intel_sdvo,
+ 				    intel_sdvo->caps.output_flags) != true) {
+ 		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ 			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+-		goto err_enc;
++		goto err;
+ 	}
+ 
+ 	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+ 
+ 	/* Set the input timing to the screen. Assume always input 0. */
+ 	if (!intel_sdvo_set_target_input(intel_sdvo))
+-		goto err_enc;
++		goto err;
+ 
+ 	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ 						    &intel_sdvo->pixel_clock_min,
+ 						    &intel_sdvo->pixel_clock_max))
+-		goto err_enc;
++		goto err;
+ 
+ 	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ 			"clock range %dMHz - %dMHz, "
+@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ 			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ 	return true;
+ 
+-err_enc:
+-	drm_encoder_cleanup(&intel_encoder->enc);
+-err_i2c:
+-	if (intel_sdvo->analog_ddc_bus != NULL)
+-		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+-	if (intel_encoder->ddc_bus != NULL)
+-		intel_i2c_destroy(intel_encoder->ddc_bus);
+-	if (intel_encoder->i2c_bus != NULL)
+-		intel_i2c_destroy(intel_encoder->i2c_bus);
+-err_inteloutput:
++err:
++	drm_encoder_cleanup(&intel_encoder->base);
++	i2c_del_adapter(&intel_sdvo->ddc);
+ 	kfree(intel_sdvo);
+ 
+ 	return false;
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 4a117e3..2f76819 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -48,7 +48,7 @@ struct intel_tv {
+ 	struct intel_encoder base;
+ 
+ 	int type;
+-	char *tv_format;
++	const char *tv_format;
+ 	int margin[4];
+ 	u32 save_TV_H_CTL_1;
+ 	u32 save_TV_H_CTL_2;
+@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
+ 
+ 
+ struct tv_mode {
+-	char *name;
++	const char *name;
+ 	int clock;
+ 	int refresh; /* in millihertz (for precision) */
+ 	u32 oversample;
+@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
+ 
+ static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+ {
+-	return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
++	return container_of(encoder, struct intel_tv, base.base);
++}
++
++static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
++{
++	return container_of(intel_attached_encoder(connector),
++			    struct intel_tv,
++			    base);
+ }
+ 
+ static void
+@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
+ }
+ 
+ static const struct tv_mode *
+-intel_tv_mode_lookup (char *tv_format)
++intel_tv_mode_lookup(const char *tv_format)
+ {
+ 	int i;
+ 
+@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
+ }
+ 
+ static const struct tv_mode *
+-intel_tv_mode_find (struct intel_tv *intel_tv)
++intel_tv_mode_find(struct intel_tv *intel_tv)
+ {
+ 	return intel_tv_mode_lookup(intel_tv->tv_format);
+ }
+ 
+ static enum drm_mode_status
+-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
++intel_tv_mode_valid(struct drm_connector *connector,
++		    struct drm_display_mode *mode)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
+ 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ 
+ 	/* Ensure TV refresh is close to desired refresh */
+ 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ 				< 1000)
+ 		return MODE_OK;
++
+ 	return MODE_CLOCK_RANGE;
+ }
+ 
+@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 			   color_conversion->av);
+ 	}
+ 
+-	if (IS_I965G(dev))
++	if (INTEL_INFO(dev)->gen >= 4)
+ 		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ 	else
+ 		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ 
+ 		/* Wait for vblank for the disable to take effect */
+-		if (!IS_I9XX(dev))
++		if (IS_GEN2(dev))
+ 			intel_wait_for_vblank(dev, intel_crtc->pipe);
+ 
+-		I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
++		I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+ 		/* Wait for vblank for the disable to take effect. */
+-		intel_wait_for_vblank(dev, intel_crtc->pipe);
++		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+ 
+ 		/* Filter ctl must be set before TV_WIN_SIZE */
+ 		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 		I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ 	for (i = 0; i < 43; i++)
+ 		I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+-	I915_WRITE(TV_DAC, 0);
++	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+ 	I915_WRITE(TV_CTL, tv_ctl);
+ }
+ 
+@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
+ static int
+ intel_tv_detect_type (struct intel_tv *intel_tv)
+ {
+-	struct drm_encoder *encoder = &intel_tv->base.enc;
++	struct drm_encoder *encoder = &intel_tv->base.base;
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	unsigned long irqflags;
+ 	u32 tv_ctl, save_tv_ctl;
+ 	u32 tv_dac, save_tv_dac;
+-	int type = DRM_MODE_CONNECTOR_Unknown;
+-
+-	tv_dac = I915_READ(TV_DAC);
++	int type;
+ 
+ 	/* Disable TV interrupts around load detect or we'll recurse */
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+ 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ 
+-	/*
+-	 * Detect TV by polling)
+-	 */
+-	save_tv_dac = tv_dac;
+-	tv_ctl = I915_READ(TV_CTL);
+-	save_tv_ctl = tv_ctl;
+-	tv_ctl &= ~TV_ENC_ENABLE;
+-	tv_ctl &= ~TV_TEST_MODE_MASK;
++	save_tv_dac = tv_dac = I915_READ(TV_DAC);
++	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
++
++	/* Poll for TV detection */
++	tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ 	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+-	tv_dac &= ~TVDAC_SENSE_MASK;
+-	tv_dac &= ~DAC_A_MASK;
+-	tv_dac &= ~DAC_B_MASK;
+-	tv_dac &= ~DAC_C_MASK;
++
++	tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ 	tv_dac |= (TVDAC_STATE_CHG_EN |
+ 		   TVDAC_A_SENSE_CTL |
+ 		   TVDAC_B_SENSE_CTL |
+@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+ 		   DAC_A_0_7_V |
+ 		   DAC_B_0_7_V |
+ 		   DAC_C_0_7_V);
++
+ 	I915_WRITE(TV_CTL, tv_ctl);
+ 	I915_WRITE(TV_DAC, tv_dac);
+ 	POSTING_READ(TV_DAC);
+-	msleep(20);
+ 
+-	tv_dac = I915_READ(TV_DAC);
+-	I915_WRITE(TV_DAC, save_tv_dac);
+-	I915_WRITE(TV_CTL, save_tv_ctl);
+-	POSTING_READ(TV_CTL);
+-	msleep(20);
++	intel_wait_for_vblank(intel_tv->base.base.dev,
++			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ 
+-	/*
+-	 *  A B C
+-	 *  0 1 1 Composite
+-	 *  1 0 X svideo
+-	 *  0 0 0 Component
+-	 */
+-	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+-		DRM_DEBUG_KMS("Detected Composite TV connection\n");
+-		type = DRM_MODE_CONNECTOR_Composite;
+-	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+-		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+-		type = DRM_MODE_CONNECTOR_SVIDEO;
+-	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+-		DRM_DEBUG_KMS("Detected Component TV connection\n");
+-		type = DRM_MODE_CONNECTOR_Component;
+-	} else {
+-		DRM_DEBUG_KMS("No TV connection detected\n");
+-		type = -1;
++	type = -1;
++	if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
++		DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
++		/*
++		 *  A B C
++		 *  0 1 1 Composite
++		 *  1 0 X svideo
++		 *  0 0 0 Component
++		 */
++		if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
++			DRM_DEBUG_KMS("Detected Composite TV connection\n");
++			type = DRM_MODE_CONNECTOR_Composite;
++		} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
++			DRM_DEBUG_KMS("Detected S-Video TV connection\n");
++			type = DRM_MODE_CONNECTOR_SVIDEO;
++		} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
++			DRM_DEBUG_KMS("Detected Component TV connection\n");
++			type = DRM_MODE_CONNECTOR_Component;
++		} else {
++			DRM_DEBUG_KMS("Unrecognised TV connection\n");
++		}
+ 	}
+ 
++	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
++	I915_WRITE(TV_CTL, save_tv_ctl);
++
+ 	/* Restore interrupt config */
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ 	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+  */
+ static void intel_tv_find_better_format(struct drm_connector *connector)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
+ 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ 	int i;
+ 
+@@ -1344,14 +1347,13 @@ static enum drm_connector_status
+ intel_tv_detect(struct drm_connector *connector, bool force)
+ {
+ 	struct drm_display_mode mode;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
+ 	int type;
+ 
+ 	mode = reported_modes[0];
+ 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+ 
+-	if (encoder->crtc && encoder->crtc->enabled) {
++	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
+ 		type = intel_tv_detect_type(intel_tv);
+ 	} else if (force) {
+ 		struct drm_crtc *crtc;
+@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
+ 	return connector_status_connected;
+ }
+ 
+-static struct input_res {
+-	char *name;
++static const struct input_res {
++	const char *name;
+ 	int w, h;
+-} input_res_table[] =
+-{
++} input_res_table[] = {
+ 	{"640x480", 640, 480},
+ 	{"800x600", 800, 600},
+ 	{"1024x768", 1024, 768},
+@@ -1396,8 +1397,7 @@ static void
+ intel_tv_chose_preferred_modes(struct drm_connector *connector,
+ 			       struct drm_display_mode *mode_ptr)
+ {
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
+ 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ 
+ 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+@@ -1422,15 +1422,14 @@ static int
+ intel_tv_get_modes(struct drm_connector *connector)
+ {
+ 	struct drm_display_mode *mode_ptr;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
+ 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ 	int j, count = 0;
+ 	u64 tmp;
+ 
+ 	for (j = 0; j < ARRAY_SIZE(input_res_table);
+ 	     j++) {
+-		struct input_res *input = &input_res_table[j];
++		const struct input_res *input = &input_res_table[j];
+ 		unsigned int hactive_s = input->w;
+ 		unsigned int vactive_s = input->h;
+ 
+@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
+ 		      uint64_t val)
+ {
+ 	struct drm_device *dev = connector->dev;
+-	struct drm_encoder *encoder = intel_attached_encoder(connector);
+-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+-	struct drm_crtc *crtc = encoder->crtc;
++	struct intel_tv *intel_tv = intel_attached_tv(connector);
++	struct drm_crtc *crtc = intel_tv->base.base.crtc;
+ 	int ret = 0;
+ 	bool changed = false;
+ 
+@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ 	.mode_valid = intel_tv_mode_valid,
+ 	.get_modes = intel_tv_get_modes,
+-	.best_encoder = intel_attached_encoder,
++	.best_encoder = intel_best_encoder,
+ };
+ 
+ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
+ 	struct intel_encoder *intel_encoder;
+ 	struct intel_connector *intel_connector;
+ 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
+-	char **tv_format_names;
++	char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ 	int i, initial_mode = 0;
+ 
+ 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
+ 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ 			   DRM_MODE_CONNECTOR_SVIDEO);
+ 
+-	drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
++	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ 			 DRM_MODE_ENCODER_TVDAC);
+ 
+-	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
++	intel_connector_attach_encoder(intel_connector, intel_encoder);
+ 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ 	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+-	intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+-	intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
++	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
++	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+ 
+ 	/* BIOS margin values */
+@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
+ 	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ 	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+ 
+-	intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
++	intel_tv->tv_format = tv_modes[initial_mode].name;
+ 
+-	drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
++	drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+ 	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ 	connector->interlace_allowed = false;
+ 	connector->doublescan_allowed = false;
+ 
+ 	/* Create TV properties then attach current values */
+-	tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
+-				  GFP_KERNEL);
+-	if (!tv_format_names)
+-		goto out;
+ 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+-		tv_format_names[i] = tv_modes[i].name;
+-	drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
++		tv_format_names[i] = (char *)tv_modes[i].name;
++	drm_mode_create_tv_properties(dev,
++				      ARRAY_SIZE(tv_modes),
++				      tv_format_names);
+ 
+ 	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ 				   initial_mode);
+@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
+ 	drm_connector_attach_property(connector,
+ 				   dev->mode_config.tv_bottom_margin_property,
+ 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
+-out:
+ 	drm_sysfs_connector_add(connector);
+ }
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 3e5a51a..a4c66f6 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -762,6 +762,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ 				    void *data, struct drm_file *file_priv);
+ extern bool drm_detect_hdmi_monitor(struct edid *edid);
++extern bool drm_detect_monitor_audio(struct edid *edid);
+ extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ 				    void *data, struct drm_file *file_priv);
+ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index a49e791..83a389e 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -23,6 +23,9 @@
+ #ifndef _DRM_DP_HELPER_H_
+ #define _DRM_DP_HELPER_H_
+ 
++#include <linux/types.h>
++#include <linux/i2c.h>
++
+ /* From the VESA DisplayPort spec */
+ 
+ #define AUX_NATIVE_WRITE	0x8
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index e41c74f..8c641be 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_HAS_PAGEFLIPPING	 8
+ #define I915_PARAM_HAS_EXECBUF2          9
+ #define I915_PARAM_HAS_BSD		 10
++#define I915_PARAM_HAS_BLT		 11
+ 
+ typedef struct drm_i915_getparam {
+ 	int param;
+@@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
+ 	__u32 num_cliprects;
+ 	/** This is a struct drm_clip_rect *cliprects */
+ 	__u64 cliprects_ptr;
++#define I915_EXEC_RING_MASK              (7<<0)
++#define I915_EXEC_DEFAULT                (0<<0)
+ #define I915_EXEC_RENDER                 (1<<0)
+-#define I915_EXEC_BSD                    (1<<1)
++#define I915_EXEC_BSD                    (2<<0)
++#define I915_EXEC_BLT                    (3<<0)
+ 	__u64 flags;
+ 	__u64 rsvd1;
+ 	__u64 rsvd2;
+diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
+new file mode 100644
+index 0000000..d3c8194
+--- /dev/null
++++ b/include/drm/intel-gtt.h
+@@ -0,0 +1,18 @@
++/* Common header for intel-gtt.ko and i915.ko */
++
++#ifndef _DRM_INTEL_GTT_H
++#define	_DRM_INTEL_GTT_H
++struct intel_gtt {
++	/* Number of stolen gtt entries at the beginning. */
++	unsigned int gtt_stolen_entries;
++	/* Total number of gtt entries. */
++	unsigned int gtt_total_entries;
++	/* Part of the gtt that is mappable by the cpu, for those chips where
++	 * this is not the full gtt. */
++	unsigned int gtt_mappable_entries;
++};
++
++struct intel_gtt *intel_gtt_get(void);
++
++#endif
++
diff --git a/drm-intel-big-hammer.patch b/drm-intel-big-hammer.patch
index 63dc016b1..0d7f7f08d 100644
--- a/drm-intel-big-hammer.patch
+++ b/drm-intel-big-hammer.patch
@@ -1,16 +1,16 @@
 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 37427e4..08af9db 100644
+index 6da2c6d..f508b86 100644
 --- a/drivers/gpu/drm/i915/i915_gem.c
 +++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- 
- 	mutex_lock(&dev->struct_mutex);
+@@ -3738,6 +3738,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	if (ret)
+ 		goto pre_mutex_err;
  
 +	/* We don't get the flushing right for these chipsets, use the
-+	 * big hamer for now to avoid random crashiness. */
++	 * big hammer for now to avoid random crashiness. */
 +	if (IS_I85X(dev) || IS_I865G(dev))
 +		wbinvd();
 +
- 	i915_verify_inactive(dev, __FILE__, __LINE__);
- 
- 	if (dev_priv->mm.wedged) {
+ 	if (dev_priv->mm.suspended) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		ret = -EBUSY;
diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch
index 5ca0152da..6c089b89e 100644
--- a/drm-intel-make-lvds-work.patch
+++ b/drm-intel-make-lvds-work.patch
@@ -1,19 +1,20 @@
-diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c
---- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig	2010-03-31 16:59:39.901995671 -0400
-+++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c	2010-03-31 17:01:05.416996744 -0400
-@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p
- void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0cece04..63bbb4b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4580,7 +4580,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ 				    struct drm_connector *connector, int dpms_mode)
  {
- 	struct drm_encoder *encoder = &intel_encoder->enc;
+ 	struct drm_encoder *encoder = &intel_encoder->base;
 -	struct drm_device *dev = encoder->dev;
  	struct drm_crtc *crtc = encoder->crtc;
  	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru
- 		intel_encoder->base.encoder = NULL;
+@@ -4590,7 +4589,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ 		connector->encoder = NULL;
  		intel_encoder->load_detect_temp = false;
  		crtc->enabled = drm_helper_crtc_in_use(crtc);
 -		drm_helper_disable_unused_functions(dev);
  	}
  
- 	/* Switch crtc and output back off if necessary */
+ 	/* Switch crtc and encoder back off if necessary */
diff --git a/kernel.spec b/kernel.spec
index 8f9f84fb5..d73047cfc 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 2
+%global baserelease 3
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -128,7 +128,7 @@ Summary: The Linux kernel
 %define doc_build_fail true
 %endif
 
-%define rawhide_skip_docs 1
+%define rawhide_skip_docs 0
 %if 0%{?rawhide_skip_docs}
 %define with_doc 0
 %define doc_build_fail true
@@ -149,7 +149,7 @@ Summary: The Linux kernel
 # Set debugbuildsenabled to 1 for production (build separate debug kernels)
 #  and 0 for rawhide (all kernels are debug kernels).
 # See also 'make debug' and 'make release'.
-%define debugbuildsenabled 0
+%define debugbuildsenabled 1
 
 # Want to build a vanilla kernel build without any non-upstream patches?
 %define with_vanilla %{?_with_vanilla: 1} %{?!_with_vanilla: 0}
@@ -649,12 +649,10 @@ Patch1555: fix_xen_guest_on_old_EC2.patch
 
 # nouveau + drm fixes
 Patch1810: drm-nouveau-updates.patch
+Patch1811: drm-intel-2.6.37-rc2.patch
 Patch1819: drm-intel-big-hammer.patch
-# intel drm is all merged upstream
-Patch1824: drm-intel-next.patch
 # make sure the lvds comes back on lid open
 Patch1825: drm-intel-make-lvds-work.patch
-Patch1826: drm-i915-reprogram-power-monitoring-registers-on-resume.patch
 Patch1900: linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -1269,10 +1267,9 @@ ApplyPatch fix_xen_guest_on_old_EC2.patch
 ApplyOptionalPatch drm-nouveau-updates.patch
 
 # Intel DRM
-ApplyOptionalPatch drm-intel-next.patch
+ApplyPatch drm-intel-2.6.37-rc2.patch
 ApplyPatch drm-intel-big-hammer.patch
 ApplyPatch drm-intel-make-lvds-work.patch
-ApplyPatch drm-i915-reprogram-power-monitoring-registers-on-resume.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -1957,6 +1954,11 @@ fi
 #                 ||     ||
 
 %changelog
+* Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-3
+- Rebase drm/intel to 2.6.37-rc2+edp_fixes, hopefully to sort out most of
+  the issues folks with eDP are having.
+- Switch to release builds and turn on debugging flavours.
+
 * Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com>
 - rhbz#651019: pull in support for MBA3.
 

From b466142253c8c57f1aaf9629a0017b1d96fe37e1 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 16 Nov 2010 02:15:50 -0500
Subject: [PATCH 16/56] disable parallel doc builds

---
 kernel.spec | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index d73047cfc..c1b9cb9ed 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 3
+%global baserelease 4
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -1657,7 +1657,8 @@ BuildKernel %make_target %kernel_image smp
 
 %if %{with_doc}
 # Make the HTML and man pages.
-make %{?_smp_mflags} htmldocs mandocs || %{doc_build_fail}
+#  %{?_smp_mflags} frequently fails when j>8
+make htmldocs mandocs || %{doc_build_fail}
 
 # sometimes non-world-readable files sneak into the kernel source tree
 chmod -R a=rX Documentation
@@ -1954,6 +1955,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-4
+- Disable parallel doc builds, they fail. Constantly.
+
 * Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-3
 - Rebase drm/intel to 2.6.37-rc2+edp_fixes, hopefully to sort out most of
   the issues folks with eDP are having.

From ca5c04e7a4ace7fa606c40a162432f3a186a7ffa Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 16 Nov 2010 20:50:04 -0500
Subject: [PATCH 17/56] add MacBookAir3,1 11" id to add-macbookair3-ids.patch

reported by Jurgen Kramer <gtmkramer@xs4all.nl>,
<1289936190.2460.27.camel@paragon.slim>.
---
 add-macbookair3-ids.patch | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/add-macbookair3-ids.patch b/add-macbookair3-ids.patch
index e817c078e..b512eeaa2 100644
--- a/add-macbookair3-ids.patch
+++ b/add-macbookair3-ids.patch
@@ -253,10 +253,11 @@ diff -uNrp kernel-2.6.35.fc14.orig/drivers/video/backlight/mbp_nvidia_bl.c kerne
 diff -uNrp kernel-2.6.35.fc14.orig/sound/pci/hda/patch_cirrus.c kernel-2.6.35.fc14.new/sound/pci/hda/patch_cirrus.c
 --- kernel-2.6.35.fc14.orig/sound/pci/hda/patch_cirrus.c	2010-11-12 12:35:49.005815268 +0100
 +++ kernel-2.6.35.fc14.new/sound/pci/hda/patch_cirrus.c	2010-11-12 12:48:40.379542432 +0100
-@@ -1139,6 +1139,7 @@ static const char *cs420x_models[CS420X_
+@@ -1139,6 +1139,8 @@ static const char *cs420x_models[CS420X_
  static struct snd_pci_quirk cs420x_cfg_tbl[] = {
  	SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
  	SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
++	SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookAir 3,1", CS420X_MBP55),
 +	SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
  	{} /* terminator */
  };

From c4edb7ed468704f63b0d840d6254da4f4562d93c Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 17 Nov 2010 00:37:01 -0500
Subject: [PATCH 18/56] make vmlinuz/System.map root-only by default

---
 kernel.spec | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index c1b9cb9ed..e70b1ba50 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1896,8 +1896,8 @@ fi
 %if %{1}\
 %{expand:%%files %{?2}}\
 %defattr(-,root,root)\
-/%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:.%{2}}\
-/boot/System.map-%{KVERREL}%{?2:.%{2}}\
+%attr(600,root,root) /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:.%{2}}\
+%attr(600,root,root) /boot/System.map-%{KVERREL}%{?2:.%{2}}\
 /boot/config-%{KVERREL}%{?2:.%{2}}\
 %dir /lib/modules/%{KVERREL}%{?2:.%{2}}\
 /lib/modules/%{KVERREL}%{?2:.%{2}}/kernel\
@@ -1955,6 +1955,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com>
+- Make vmlinuz/System.map root read-write only by default. You can just
+  chmod 644 them later if you (unlikely) need them without root.
+
 * Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-4
 - Disable parallel doc builds, they fail. Constantly.
 

From 16ab22b5320d6df5f4eaae3ed419d2f9c713b284 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 17 Nov 2010 16:48:10 -0500
Subject: [PATCH 19/56] disable drm-intel rebase

---
 kernel.spec | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index e70b1ba50..d85d38073 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 4
+%global baserelease 5
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -1267,9 +1267,9 @@ ApplyPatch fix_xen_guest_on_old_EC2.patch
 ApplyOptionalPatch drm-nouveau-updates.patch
 
 # Intel DRM
-ApplyPatch drm-intel-2.6.37-rc2.patch
-ApplyPatch drm-intel-big-hammer.patch
-ApplyPatch drm-intel-make-lvds-work.patch
+#ApplyPatch drm-intel-2.6.37-rc2.patch
+#ApplyPatch drm-intel-big-hammer.patch
+#ApplyPatch drm-intel-make-lvds-work.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -1955,6 +1955,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-5
+- Disable drm/intel rebase until it can be fixed.
+
 * Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com>
 - Make vmlinuz/System.map root read-write only by default. You can just
   chmod 644 them later if you (unlikely) need them without root.

From 49d23722dfbdeca9a8cb66ba87104e31bb98e62f Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Sat, 20 Nov 2010 08:28:57 -0500
Subject: [PATCH 20/56] fix BUG when using xt_SECMARK

---
 kernel.spec                                   |  8 +++++
 ...t-return-early-if-there-was-no-error.patch | 33 +++++++++++++++++++
 2 files changed, 41 insertions(+)
 create mode 100644 secmark-do-not-return-early-if-there-was-no-error.patch

diff --git a/kernel.spec b/kernel.spec
index d85d38073..3fb5865b5 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -724,6 +724,8 @@ Patch12303: dmar-disable-when-ricoh-multifunction.patch
 
 Patch12305: xhci_hcd-suspend-resume.patch
 
+Patch12306: secmark-do-not-return-early-if-there-was-no-error.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1342,6 +1344,8 @@ ApplyPatch dmar-disable-when-ricoh-multifunction.patch
 
 ApplyPatch xhci_hcd-suspend-resume.patch
 
+ApplyPatch secmark-do-not-return-early-if-there-was-no-error.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1955,6 +1959,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com>
+- secmark-do-not-return-early-if-there-was-no-error.patch: requested
+  by eparis@. (Fixes a BUG when using secmark.)
+
 * Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-5
 - Disable drm/intel rebase until it can be fixed.
 
diff --git a/secmark-do-not-return-early-if-there-was-no-error.patch b/secmark-do-not-return-early-if-there-was-no-error.patch
new file mode 100644
index 000000000..6515bd043
--- /dev/null
+++ b/secmark-do-not-return-early-if-there-was-no-error.patch
@@ -0,0 +1,33 @@
+From 15714f7b58011cf3948cab2988abea560240c74f Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Tue, 12 Oct 2010 11:40:08 -0400
+Subject: [PATCH] secmark: do not return early if there was no error
+
+Commit 4a5a5c73 attempted to pass decent error messages back to userspace for
+netfilter errors.  In xt_SECMARK.c however the patch screwed up and returned
+on 0 (aka no error) early and didn't finish setting up secmark.  This results
+in a kernel BUG if you use SECMARK.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Acked-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+---
+ net/netfilter/xt_SECMARK.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
+index 23b2d6c..364ad16 100644
+--- a/net/netfilter/xt_SECMARK.c
++++ b/net/netfilter/xt_SECMARK.c
+@@ -101,7 +101,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
+ 	switch (info->mode) {
+ 	case SECMARK_MODE_SEL:
+ 		err = checkentry_selinux(info);
+-		if (err <= 0)
++		if (err)
+ 			return err;
+ 		break;
+ 
+-- 
+1.7.3.2
+

From 1ef87abd797fabe88b6e403388d4541779862f07 Mon Sep 17 00:00:00 2001
From: Chuck Ebbert <cebbert@redhat.com>
Date: Sat, 20 Nov 2010 09:36:07 -0500
Subject: [PATCH 21/56] Linux 2.6.36.1-rc1

---
 kernel.spec | 9 ++++++---
 sources     | 1 +
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index d85d38073..385f7f375 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 5
+%global baserelease 6
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -63,9 +63,9 @@ Summary: The Linux kernel
 %if 0%{?released_kernel}
 
 # Do we have a -stable update to apply?
-%define stable_update 0
+%define stable_update 1
 # Is it a -stable RC?
-%define stable_rc 0
+%define stable_rc 1
 # Set rpm version accordingly
 %if 0%{?stable_update}
 %define stablerev .%{stable_update}
@@ -1955,6 +1955,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Sat Nov 20 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.36.1-6.rc1
+- Linux 2.6.36.1-rc1
+
 * Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-5
 - Disable drm/intel rebase until it can be fixed.
 
diff --git a/sources b/sources
index a51803471..d35ceb4a3 100644
--- a/sources
+++ b/sources
@@ -1 +1,2 @@
 61f3739a73afb6914cb007f37fb09b62  linux-2.6.36.tar.bz2
+1278010cbfefa16acba402d8b0829b66  patch-2.6.36.1-rc1.bz2

From b7e5e1b1d1a429e402a1c22a9a1b1dfea3c608df Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Sat, 20 Nov 2010 20:53:02 -0500
Subject: [PATCH 22/56] Allow debuginfo to be multiply-installed

Patch from aris@ on fedora-kernel.
---
 kernel.spec | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index fb3f4e66e..dd67ea681 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1084,17 +1084,17 @@ else
 fi
 
 # Now build the fedora kernel tree.
-if [ -d linux-%{kversion}.%{_target_cpu} ]; then
+if [ -d linux-%{KVERREL} ]; then
   # Just in case we ctrl-c'd a prep already
   rm -rf deleteme.%{_target_cpu}
   # Move away the stale away, and delete in background.
-  mv linux-%{kversion}.%{_target_cpu} deleteme.%{_target_cpu}
+  mv linux-%{KVERREL} deleteme.%{_target_cpu}
   rm -rf deleteme.%{_target_cpu} &
 fi
 
-cp -rl vanilla-%{vanillaversion} linux-%{kversion}.%{_target_cpu}
+cp -rl vanilla-%{vanillaversion} linux-%{KVERREL}
 
-cd linux-%{kversion}.%{_target_cpu}
+cd linux-%{KVERREL}
 
 # released_kernel with possible stable updates
 %if 0%{?stable_base}
@@ -1630,7 +1630,7 @@ rm -rf $RPM_BUILD_ROOT
 mkdir -p $RPM_BUILD_ROOT/boot
 mkdir -p $RPM_BUILD_ROOT%{_libexecdir}
 
-cd linux-%{kversion}.%{_target_cpu}
+cd linux-%{KVERREL}
 
 %if %{with_debug}
 BuildKernel %make_target %kernel_image debug
@@ -1696,7 +1696,7 @@ find Documentation -type d | xargs chmod u+w
 
 %install
 
-cd linux-%{kversion}.%{_target_cpu}
+cd linux-%{KVERREL}
 
 %if %{with_doc}
 docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion}
@@ -1959,6 +1959,11 @@ fi
 #                 ||     ||
 
 %changelog
+* Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com>
+- Merge patch from Aris to allow kernel-debuginfo to be multiply-installed
+  (means we had to move the build dir, kind of a bummer, but I verified
+   that a -gitN to -gitN+1 worked.)
+
 * Sat Nov 20 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.36.1-6.rc1
 - Linux 2.6.36.1-rc1
 - Comment out upstreamed patches:

From a9179c05e5fe8a661f8856cfeb09298e9079ecf9 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 22 Nov 2010 08:19:35 -0500
Subject: [PATCH 23/56] make vmlinuz world readable

---
 kernel.spec | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index dd67ea681..ebe9ea95d 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 6
+%global baserelease 7
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -1900,7 +1900,7 @@ fi
 %if %{1}\
 %{expand:%%files %{?2}}\
 %defattr(-,root,root)\
-%attr(600,root,root) /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:.%{2}}\
+/%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:.%{2}}\
 %attr(600,root,root) /boot/System.map-%{KVERREL}%{?2:.%{2}}\
 /boot/config-%{KVERREL}%{?2:.%{2}}\
 %dir /lib/modules/%{KVERREL}%{?2:.%{2}}\
@@ -1959,6 +1959,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-7.rc1
+- Make vmlinuz world readable again.
+
 * Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com>
 - Merge patch from Aris to allow kernel-debuginfo to be multiply-installed
   (means we had to move the build dir, kind of a bummer, but I verified

From 61b68ccc4db016a38ac82116625f80125d93d198 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 22 Nov 2010 14:03:11 -0500
Subject: [PATCH 24/56] TTY: restore tty_ldisc_wait_idle

---
 kernel.spec                           |  10 ++-
 tty-restore-tty_ldisc_wait_idle.patch | 117 ++++++++++++++++++++++++++
 2 files changed, 126 insertions(+), 1 deletion(-)
 create mode 100644 tty-restore-tty_ldisc_wait_idle.patch

diff --git a/kernel.spec b/kernel.spec
index ebe9ea95d..319981471 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 7
+%global baserelease 8
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -726,6 +726,8 @@ Patch12305: xhci_hcd-suspend-resume.patch
 
 Patch12306: secmark-do-not-return-early-if-there-was-no-error.patch
 
+Patch12307: tty-restore-tty_ldisc_wait_idle.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1346,6 +1348,8 @@ ApplyPatch xhci_hcd-suspend-resume.patch
 
 #ApplyPatch secmark-do-not-return-early-if-there-was-no-error.patch
 
+ApplyPatch tty-restore-tty_ldisc_wait_idle.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1959,6 +1963,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-8.rc1
+- Merge 100eeae2 (TTY: restore tty_ldisc_wait_idle) which should fix the WARN
+  in tty_open in rawhide.
+
 * Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-7.rc1
 - Make vmlinuz world readable again.
 
diff --git a/tty-restore-tty_ldisc_wait_idle.patch b/tty-restore-tty_ldisc_wait_idle.patch
new file mode 100644
index 000000000..3e784dd57
--- /dev/null
+++ b/tty-restore-tty_ldisc_wait_idle.patch
@@ -0,0 +1,117 @@
+From 4d458f558d5b904f14080b073b549d18c9503f93 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Sun, 31 Oct 2010 23:17:51 +0100
+Subject: TTY: restore tty_ldisc_wait_idle
+
+It was removed in 65b770468e98 (tty-ldisc: turn ldisc user count into
+a proper refcount), but we need to wait for last user to quit the
+ldisc before we close it in tty_set_ldisc.
+
+Otherwise weird things start to happen. There might be processes
+waiting in tty_read->n_tty_read on tty->read_wait for input to appear
+and at that moment, a change of ldisc is fatal. n_tty_close is called,
+it frees read_buf and the waiting process is still in the middle of
+reading and goes nuts after it is woken.
+
+Previously we prevented close to happen when others are in ldisc ops
+by tty_ldisc_wait_idle in tty_set_ldisc. But the commit above removed
+that. So revoke the change and test whether there is 1 user (=we), and
+allow the close then.
+
+We can do that without ldisc/tty locks, because nobody else can open
+the device due to TTY_LDISC_CHANGING bit set, so we in fact wait for
+everybody to leave.
+
+I don't understand why tty_ldisc_lock would be needed either when the
+counter is an atomic variable, so this is a lockless
+tty_ldisc_wait_idle.
+
+On the other hand, if we fail to wait (timeout or signal), we have to
+reenable the halted ldiscs, so we take ldisc lock and reuse the setup
+path at the end of tty_set_ldisc.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@breakpoint.cc>
+LKML-Reference: <20101031104136.GA511@Chamillionaire.breakpoint.cc>
+LKML-Reference: <1287669539-22644-1-git-send-email-jslaby@suse.cz>
+Cc: Alan Cox <alan@linux.intel.com>
+Cc: stable@kernel.org [32, 33, 36]
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/char/tty_ldisc.c |   29 +++++++++++++++++++++++++++++
+ 1 files changed, 29 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
+index 412f977..5bbf33a 100644
+--- a/drivers/char/tty_ldisc.c
++++ b/drivers/char/tty_ldisc.c
+@@ -47,6 +47,7 @@
+ 
+ static DEFINE_SPINLOCK(tty_ldisc_lock);
+ static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
++static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
+ /* Line disc dispatch table */
+ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
+ 
+@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
+ 		return;
+ 	}
+ 	local_irq_restore(flags);
++	wake_up(&tty_ldisc_idle);
+ }
+ 
+ /**
+@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
+ }
+ 
+ /**
++ *	tty_ldisc_wait_idle	-	wait for the ldisc to become idle
++ *	@tty: tty to wait for
++ *
++ *	Wait for the line discipline to become idle. The discipline must
++ *	have been halted for this to guarantee it remains idle.
++ */
++static int tty_ldisc_wait_idle(struct tty_struct *tty)
++{
++	int ret;
++	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
++			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
++	if (ret < 0)
++		return ret;
++	return ret > 0 ? 0 : -EBUSY;
++}
++
++/**
+  *	tty_set_ldisc		-	set line discipline
+  *	@tty: the terminal to set
+  *	@ldisc: the line discipline
+@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+ 
+ 	flush_scheduled_work();
+ 
++	retval = tty_ldisc_wait_idle(tty);
++
+ 	tty_lock();
+ 	mutex_lock(&tty->ldisc_mutex);
++
++	/* handle wait idle failure locked */
++	if (retval) {
++		tty_ldisc_put(new_ldisc);
++		goto enable;
++	}
++
+ 	if (test_bit(TTY_HUPPED, &tty->flags)) {
+ 		/* We were raced by the hangup method. It will have stomped
+ 		   the ldisc data and closed the ldisc down */
+@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+ 
+ 	tty_ldisc_put(o_ldisc);
+ 
++enable:
+ 	/*
+ 	 *	Allow ldisc referencing to occur again
+ 	 */
+-- 
+1.7.3.2
+

From ae441b0e3169188eb6f3b4a5520e3e4b5e1ef5ff Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 22 Nov 2010 14:45:44 -0500
Subject: [PATCH 25/56] fix incorrect reporting of whether nx is disabled or
 not

Thanks to Kees Cook for noticing.
Message-ID: <20101121070342.GE4617@outflux.net>
---
 linux-2.6-i386-nx-emulation.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/linux-2.6-i386-nx-emulation.patch b/linux-2.6-i386-nx-emulation.patch
index e36bb5ecc..094c5b845 100644
--- a/linux-2.6-i386-nx-emulation.patch
+++ b/linux-2.6-i386-nx-emulation.patch
@@ -384,7 +384,7 @@
  void __init x86_report_nx(void)
  {
  	if (!cpu_has_nx) {
-+		if (disable_nx)
++		if (!disable_nx)
 +			printk(KERN_INFO "Using x86 segment limits to approximate NX protection\n");
 +		else
 +

From ebb38001c4b9f787e8e74e183d470aa3834f370f Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 22 Nov 2010 15:58:40 -0500
Subject: [PATCH 26/56] Linux 2.6.36.1

---
 .gitignore                                    |  1 +
 kernel.spec                                   | 15 ++++-----
 ...t-return-early-if-there-was-no-error.patch | 33 -------------------
 sources                                       |  2 +-
 4 files changed, 9 insertions(+), 42 deletions(-)
 delete mode 100644 secmark-do-not-return-early-if-there-was-no-error.patch

diff --git a/.gitignore b/.gitignore
index c2fd41347..688b3b38b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@ patch-*.bz2
 clog
 *.rpm
 kernel-2.6.*/
+/patch-2.6.36.1.bz2
diff --git a/kernel.spec b/kernel.spec
index 319981471..507bc7a01 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 8
+%global baserelease 9
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -65,7 +65,7 @@ Summary: The Linux kernel
 # Do we have a -stable update to apply?
 %define stable_update 1
 # Is it a -stable RC?
-%define stable_rc 1
+%define stable_rc 0
 # Set rpm version accordingly
 %if 0%{?stable_update}
 %define stablerev .%{stable_update}
@@ -82,9 +82,9 @@ Summary: The Linux kernel
 # The next upstream release sublevel (base_sublevel+1)
 %define upstream_sublevel %(echo $((%{base_sublevel} + 1)))
 # The rc snapshot level
-%define rcrev 8
+%define rcrev 0
 # The git snapshot level
-%define gitrev 5
+%define gitrev 0
 # Set rpm version accordingly
 %define rpmversion 2.6.%{upstream_sublevel}
 %endif
@@ -724,8 +724,6 @@ Patch12303: dmar-disable-when-ricoh-multifunction.patch
 
 Patch12305: xhci_hcd-suspend-resume.patch
 
-Patch12306: secmark-do-not-return-early-if-there-was-no-error.patch
-
 Patch12307: tty-restore-tty_ldisc_wait_idle.patch
 
 %endif
@@ -1346,8 +1344,6 @@ ApplyPatch dmar-disable-when-ricoh-multifunction.patch
 
 ApplyPatch xhci_hcd-suspend-resume.patch
 
-#ApplyPatch secmark-do-not-return-early-if-there-was-no-error.patch
-
 ApplyPatch tty-restore-tty_ldisc_wait_idle.patch
 
 # END OF PATCH APPLICATIONS
@@ -1963,6 +1959,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-9
+- Linux stable 2.6.36.1
+
 * Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-8.rc1
 - Merge 100eeae2 (TTY: restore tty_ldisc_wait_idle) which should fix the WARN
   in tty_open in rawhide.
diff --git a/secmark-do-not-return-early-if-there-was-no-error.patch b/secmark-do-not-return-early-if-there-was-no-error.patch
deleted file mode 100644
index 6515bd043..000000000
--- a/secmark-do-not-return-early-if-there-was-no-error.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 15714f7b58011cf3948cab2988abea560240c74f Mon Sep 17 00:00:00 2001
-From: Eric Paris <eparis@redhat.com>
-Date: Tue, 12 Oct 2010 11:40:08 -0400
-Subject: [PATCH] secmark: do not return early if there was no error
-
-Commit 4a5a5c73 attempted to pass decent error messages back to userspace for
-netfilter errors.  In xt_SECMARK.c however the patch screwed up and returned
-on 0 (aka no error) early and didn't finish setting up secmark.  This results
-in a kernel BUG if you use SECMARK.
-
-Signed-off-by: Eric Paris <eparis@redhat.com>
-Acked-by: Paul Moore <paul.moore@hp.com>
-Signed-off-by: James Morris <jmorris@namei.org>
----
- net/netfilter/xt_SECMARK.c |    2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
-index 23b2d6c..364ad16 100644
---- a/net/netfilter/xt_SECMARK.c
-+++ b/net/netfilter/xt_SECMARK.c
-@@ -101,7 +101,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
- 	switch (info->mode) {
- 	case SECMARK_MODE_SEL:
- 		err = checkentry_selinux(info);
--		if (err <= 0)
-+		if (err)
- 			return err;
- 		break;
- 
--- 
-1.7.3.2
-
diff --git a/sources b/sources
index d35ceb4a3..de818e4f3 100644
--- a/sources
+++ b/sources
@@ -1,2 +1,2 @@
 61f3739a73afb6914cb007f37fb09b62  linux-2.6.36.tar.bz2
-1278010cbfefa16acba402d8b0829b66  patch-2.6.36.1-rc1.bz2
+dd38a6caf08df2822f93541ee95aed7d  patch-2.6.36.1.bz2

From 15589b25c9171d800bd0faf570d6b48da1e1270f Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 22 Nov 2010 18:15:07 -0500
Subject: [PATCH 27/56] add a debugging patch around the tty_reopen WARN_ON

---
 debug-tty-print-dev-name.patch | 17 +++++++++++++++++
 kernel.spec                    |  8 ++++++++
 2 files changed, 25 insertions(+)
 create mode 100644 debug-tty-print-dev-name.patch

diff --git a/debug-tty-print-dev-name.patch b/debug-tty-print-dev-name.patch
new file mode 100644
index 000000000..720f6632d
--- /dev/null
+++ b/debug-tty-print-dev-name.patch
@@ -0,0 +1,17 @@
+diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+index 613c852..09c86d2 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -1322,7 +1322,11 @@ static int tty_reopen(struct tty_struct *tty)
+ 	tty->driver = driver; /* N.B. why do this every time?? */
+ 
+ 	mutex_lock(&tty->ldisc_mutex);
+-	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
++	if (!test_bit(TTY_LDISC, &tty->flags)) {
++		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s\n",
++			__func, tty->name);
++		WARN_ON(1);
++	}
+ 	mutex_unlock(&tty->ldisc_mutex);
+ 
+ 	return 0;
diff --git a/kernel.spec b/kernel.spec
index 507bc7a01..adcef1b77 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -609,6 +609,8 @@ Patch202: linux-2.6-debug-taint-vm.patch
 Patch203: linux-2.6-debug-vm-would-have-oomkilled.patch
 Patch204: linux-2.6-debug-always-inline-kzalloc.patch
 
+Patch210: debug-tty-print-dev-name.patch
+
 Patch380: linux-2.6-defaults-pci_no_msi.patch
 Patch381: linux-2.6-defaults-pci_use_crs.patch
 Patch383: linux-2.6-defaults-aspm.patch
@@ -1201,6 +1203,8 @@ ApplyPatch linux-2.6-debug-taint-vm.patch
 ###FIX###ApplyPatch linux-2.6-debug-vm-would-have-oomkilled.patch
 ApplyPatch linux-2.6-debug-always-inline-kzalloc.patch
 
+ApplyPatch debug-tty-print-dev-name.patch
+
 #
 # PCI
 #
@@ -1959,6 +1963,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com>
+- Add a debugging patch to help track down which tty is being
+  poked by plymouth.
+
 * Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-9
 - Linux stable 2.6.36.1
 

From 6b5e37dd4b06e1ffb3f40d888660f3635f96b6ba Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 23 Nov 2010 09:50:52 -0500
Subject: [PATCH 28/56] fix i8k inline asm to avoid miscompilation with newer
 gcc

---
 fix-i8k-inline-asm.patch | 84 ++++++++++++++++++++++++++++++++++++++++
 kernel.spec              |  8 ++++
 2 files changed, 92 insertions(+)
 create mode 100644 fix-i8k-inline-asm.patch

diff --git a/fix-i8k-inline-asm.patch b/fix-i8k-inline-asm.patch
new file mode 100644
index 000000000..87fadc014
--- /dev/null
+++ b/fix-i8k-inline-asm.patch
@@ -0,0 +1,84 @@
+commit 22d3243de86bc92d874abb7c5b185d5c47aba323
+Author: Jim Bos <jim876@xs4all.nl>
+Date:   Mon Nov 15 21:22:37 2010 +0100
+
+    Fix gcc 4.5.1 miscompiling drivers/char/i8k.c (again)
+    
+    The fix in commit 6b4e81db2552 ("i8k: Tell gcc that *regs gets
+    clobbered") to work around the gcc miscompiling i8k.c to add "+m
+    (*regs)" caused register pressure problems and a build failure.
+    
+    Changing the 'asm' statement to 'asm volatile' instead should prevent
+    that and works around the gcc bug as well, so we can remove the "+m".
+    
+    [ Background on the gcc bug: a memory clobber fails to mark the function
+      the asm resides in as non-pure (aka "__attribute__((const))"), so if
+      the function does nothing else that triggers the non-pure logic, gcc
+      will think that that function has no side effects at all. As a result,
+      callers will be mis-compiled.
+    
+      Adding the "+m" made gcc see that it's not a pure function, and so
+      does "asm volatile". The problem was never really the need to mark
+      "*regs" as changed, since the memory clobber did that part - the
+      problem was just a bug in the gcc "pure" function analysis  - Linus ]
+    
+    Signed-off-by: Jim Bos <jim876@xs4all.nl>
+    Acked-by: Jakub Jelinek <jakub@redhat.com>
+    Cc: Andi Kleen <andi@firstfloor.org>
+    Cc: Andreas Schwab <schwab@linux-m68k.org>
+    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 6b4e81db2552bad04100e7d5ddeed7e848f53b48
+Author: Jim Bos <jim876@xs4all.nl>
+Date:   Sat Nov 13 12:13:53 2010 +0100
+
+    i8k: Tell gcc that *regs gets clobbered
+    
+    More recent GCC caused the i8k driver to stop working, on Slackware
+    compiler was upgraded from gcc-4.4.4 to gcc-4.5.1 after which it didn't
+    work anymore, meaning the driver didn't load or gave total nonsensical
+    output.
+    
+    As it turned out the asm(..) statement forgot to mention it modifies the
+    *regs variable.
+    
+    Credits to Andi Kleen and Andreas Schwab for providing the fix.
+    
+    Signed-off-by: Jim Bos <jim876@xs4all.nl>
+    Cc: Andi Kleen <andi@firstfloor.org>
+    Cc: Andreas Schwab <schwab@linux-m68k.org>
+    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+---
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index 3bc0eef..d72433f 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs)
+ 	int eax = regs->eax;
+ 
+ #if defined(CONFIG_X86_64)
+-	asm("pushq %%rax\n\t"
++	asm volatile("pushq %%rax\n\t"
+ 		"movl 0(%%rax),%%edx\n\t"
+ 		"pushq %%rdx\n\t"
+ 		"movl 4(%%rax),%%ebx\n\t"
+@@ -146,7 +146,7 @@ static int i8k_smm(struct smm_regs *regs)
+ 		:    "a"(regs)
+ 		:    "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+ #else
+-	asm("pushl %%eax\n\t"
++	asm volatile("pushl %%eax\n\t"
+ 	    "movl 0(%%eax),%%edx\n\t"
+ 	    "push %%edx\n\t"
+ 	    "movl 4(%%eax),%%ebx\n\t"
+@@ -167,7 +167,8 @@ static int i8k_smm(struct smm_regs *regs)
+ 	    "movl %%edx,0(%%eax)\n\t"
+ 	    "lahf\n\t"
+ 	    "shrl $8,%%eax\n\t"
+-	    "andl $1,%%eax\n":"=a"(rc)
++	    "andl $1,%%eax\n"
++	    :"=a"(rc)
+ 	    :    "a"(regs)
+ 	    :    "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+ #endif
diff --git a/kernel.spec b/kernel.spec
index adcef1b77..8b08076ea 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -728,6 +728,8 @@ Patch12305: xhci_hcd-suspend-resume.patch
 
 Patch12307: tty-restore-tty_ldisc_wait_idle.patch
 
+Patch12308: fix-i8k-inline-asm.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1350,6 +1352,8 @@ ApplyPatch xhci_hcd-suspend-resume.patch
 
 ApplyPatch tty-restore-tty_ldisc_wait_idle.patch
 
+ApplyPatch fix-i8k-inline-asm.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1963,6 +1967,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
+- fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git
+  [22d3243d, 6b4e81db] (rhbz#647677)
+
 * Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com>
 - Add a debugging patch to help track down which tty is being
   poked by plymouth.

From 79bb6459c7ef296ddc40393eb4da4bcffd3329d9 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 23 Nov 2010 10:06:02 -0500
Subject: [PATCH 29/56] initialize struct memory to zero in ipc compat
 (CVE-2010-4073)

---
 ipc-zero-struct-memory-for-compat-fns.patch | 73 +++++++++++++++++++++
 kernel.spec                                 |  8 +++
 2 files changed, 81 insertions(+)
 create mode 100644 ipc-zero-struct-memory-for-compat-fns.patch

diff --git a/ipc-zero-struct-memory-for-compat-fns.patch b/ipc-zero-struct-memory-for-compat-fns.patch
new file mode 100644
index 000000000..b682c7df0
--- /dev/null
+++ b/ipc-zero-struct-memory-for-compat-fns.patch
@@ -0,0 +1,73 @@
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Wed, 27 Oct 2010 22:34:17 +0000 (-0700)
+Subject: ipc: initialize structure memory to zero for compat functions
+X-Git-Tag: v2.6.37-rc1~85^2~50
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=03145beb455cf5c20a761e8451e30b8a74ba58d9
+
+ipc: initialize structure memory to zero for compat functions
+
+This takes care of leaking uninitialized kernel stack memory to
+userspace from non-zeroed fields in structs in compat ipc functions.
+
+Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/ipc/compat.c b/ipc/compat.c
+index 9dc2c7d..845a287 100644
+--- a/ipc/compat.c
++++ b/ipc/compat.c
+@@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
+ 	struct semid64_ds __user *up64;
+ 	int version = compat_ipc_parse_version(&third);
+ 
++	memset(&s64, 0, sizeof(s64));
++
+ 	if (!uptr)
+ 		return -EINVAL;
+ 	if (get_user(pad, (u32 __user *) uptr))
+@@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
+ 	int version = compat_ipc_parse_version(&second);
+ 	void __user *p;
+ 
++	memset(&m64, 0, sizeof(m64));
++
+ 	switch (second & (~IPC_64)) {
+ 	case IPC_INFO:
+ 	case IPC_RMID:
+@@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
+ 	int err, err2;
+ 	int version = compat_ipc_parse_version(&second);
+ 
++	memset(&s64, 0, sizeof(s64));
++
+ 	switch (second & (~IPC_64)) {
+ 	case IPC_RMID:
+ 	case SHM_LOCK:
+diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
+index d8d1e9f..380ea4f 100644
+--- a/ipc/compat_mq.c
++++ b/ipc/compat_mq.c
+@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
+ 	void __user *p = NULL;
+ 	if (u_attr && oflag & O_CREAT) {
+ 		struct mq_attr attr;
++
++		memset(&attr, 0, sizeof(attr));
++
+ 		p = compat_alloc_user_space(sizeof(attr));
+ 		if (get_compat_mq_attr(&attr, u_attr) ||
+ 		    copy_to_user(p, &attr, sizeof(attr)))
+@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
+ 	struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
+ 	long ret;
+ 
++	memset(&mqstat, 0, sizeof(mqstat));
++
+ 	if (u_mqstat) {
+ 		if (get_compat_mq_attr(&mqstat, u_mqstat) ||
+ 		    copy_to_user(p, &mqstat, sizeof(mqstat)))
diff --git a/kernel.spec b/kernel.spec
index 8b08076ea..d3188d470 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -730,6 +730,8 @@ Patch12307: tty-restore-tty_ldisc_wait_idle.patch
 
 Patch12308: fix-i8k-inline-asm.patch
 
+Patch12400: ipc-zero-struct-memory-for-compat-fns.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1354,6 +1356,9 @@ ApplyPatch tty-restore-tty_ldisc_wait_idle.patch
 
 ApplyPatch fix-i8k-inline-asm.patch
 
+# rhbz#648658 (CVE-2010-4073)
+ApplyPatch ipc-zero-struct-memory-for-compat-fns.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1967,6 +1972,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
+- zero struct memory in ipc compat (CVE-2010-4073) (#648658)
+
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git
   [22d3243d, 6b4e81db] (rhbz#647677)

From 205e1d9754dc714ba93939501079ea6bedde3371 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 23 Nov 2010 10:55:43 -0500
Subject: [PATCH 30/56] zero struct memory in ipc shm (CVE-2010-4072)

---
 ipc-shm-fix-information-leak-to-user.patch | 30 ++++++++++++++++++++++
 kernel.spec                                |  5 ++++
 2 files changed, 35 insertions(+)
 create mode 100644 ipc-shm-fix-information-leak-to-user.patch

diff --git a/ipc-shm-fix-information-leak-to-user.patch b/ipc-shm-fix-information-leak-to-user.patch
new file mode 100644
index 000000000..b23ad439d
--- /dev/null
+++ b/ipc-shm-fix-information-leak-to-user.patch
@@ -0,0 +1,30 @@
+From: Vasiliy Kulikov <segooon@gmail.com>
+Date: Sat, 30 Oct 2010 14:22:49 +0000 (+0400)
+Subject: ipc: shm: fix information leak to userland
+X-Git-Tag: v2.6.37-rc1~24
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3af54c9bd9e6f14f896aac1bb0e8405ae0bc7a44
+
+ipc: shm: fix information leak to userland
+
+The shmid_ds structure is copied to userland with shm_unused{,2,3}
+fields unitialized.  It leads to leaking of contents of kernel stack
+memory.
+
+Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
+Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/ipc/shm.c b/ipc/shm.c
+index fd658a1..7d3bb22 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -479,6 +479,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
+ 	    {
+ 		struct shmid_ds out;
+ 
++		memset(&out, 0, sizeof(out));
+ 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
+ 		out.shm_segsz	= in->shm_segsz;
+ 		out.shm_atime	= in->shm_atime;
diff --git a/kernel.spec b/kernel.spec
index d3188d470..2684a58be 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -731,6 +731,7 @@ Patch12307: tty-restore-tty_ldisc_wait_idle.patch
 Patch12308: fix-i8k-inline-asm.patch
 
 Patch12400: ipc-zero-struct-memory-for-compat-fns.patch
+Patch12401: ipc-shm-fix-information-leak-to-user.patch
 
 %endif
 
@@ -1359,6 +1360,9 @@ ApplyPatch fix-i8k-inline-asm.patch
 # rhbz#648658 (CVE-2010-4073)
 ApplyPatch ipc-zero-struct-memory-for-compat-fns.patch
 
+# rhbz#648656 (CVE-2010-4072)
+ApplyPatch ipc-shm-fix-information-leak-to-user.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1974,6 +1978,7 @@ fi
 %changelog
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - zero struct memory in ipc compat (CVE-2010-4073) (#648658)
+- zero struct memory in ipc shm (CVE-2010-4072) (#648656)
 
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git

From ab76abd31e8abb08af4a749f543a16ce44a861b7 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 23 Nov 2010 11:08:44 -0500
Subject: [PATCH 31/56] fix logic error in INET_DIAG bytecode auditing
 (CVE-2010-3880)

---
 ...-we-run-the-same-bytecode-we-audited.patch | 105 ++++++++++++++++++
 kernel.spec                                   |   6 +
 2 files changed, 111 insertions(+)
 create mode 100644 inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch

diff --git a/inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch b/inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
new file mode 100644
index 000000000..bb1693fdd
--- /dev/null
+++ b/inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
@@ -0,0 +1,105 @@
+From 83962729dfeb1586d2909ae9b5bab204786a9afc Mon Sep 17 00:00:00 2001
+From: Nelson Elhage <nelhage@ksplice.com>
+Date: Wed, 3 Nov 2010 16:35:41 +0000
+Subject: inet_diag: Make sure we actually run the same bytecode we audited.
+
+We were using nlmsg_find_attr() to look up the bytecode by attribute when
+auditing, but then just using the first attribute when actually running
+bytecode. So, if we received a message with two attribute elements, where only
+the second had type INET_DIAG_REQ_BYTECODE, we would validate and run different
+bytecode strings.
+
+Fix this by consistently using nlmsg_find_attr everywhere.
+
+Signed-off-by: Nelson Elhage <nelhage@ksplice.com>
+Signed-off-by: Thomas Graf <tgraf@infradead.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/inet_diag.c |   27 ++++++++++++++++-----------
+ 1 files changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index e5fa2dd..7403b9b 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk,
+ {
+ 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+ 
+-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
++	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ 		struct inet_diag_entry entry;
+-		struct rtattr *bc = (struct rtattr *)(r + 1);
++		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
++							  sizeof(*r),
++							  INET_DIAG_REQ_BYTECODE);
+ 		struct inet_sock *inet = inet_sk(sk);
+ 
+ 		entry.family = sk->sk_family;
+@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk,
+ 		entry.dport = ntohs(inet->inet_dport);
+ 		entry.userlocks = sk->sk_userlocks;
+ 
+-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
++		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+ 			return 0;
+ 	}
+ 
+@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+ {
+ 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+ 
+-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
++	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ 		struct inet_diag_entry entry;
+-		struct rtattr *bc = (struct rtattr *)(r + 1);
++		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
++							  sizeof(*r),
++							  INET_DIAG_REQ_BYTECODE);
+ 
+ 		entry.family = tw->tw_family;
+ #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+ 		entry.dport = ntohs(tw->tw_dport);
+ 		entry.userlocks = 0;
+ 
+-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
++		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+ 			return 0;
+ 	}
+ 
+@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
+ 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct listen_sock *lopt;
+-	struct rtattr *bc = NULL;
++	const struct nlattr *bc = NULL;
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	int j, s_j;
+ 	int reqnum, s_reqnum;
+@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
+ 	if (!lopt || !lopt->qlen)
+ 		goto out;
+ 
+-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+-		bc = (struct rtattr *)(r + 1);
++	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
++		bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
++				     INET_DIAG_REQ_BYTECODE);
+ 		entry.sport = inet->inet_num;
+ 		entry.userlocks = sk->sk_userlocks;
+ 	}
+@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
+ 					&ireq->rmt_addr;
+ 				entry.dport = ntohs(ireq->rmt_port);
+ 
+-				if (!inet_diag_bc_run(RTA_DATA(bc),
+-						    RTA_PAYLOAD(bc), &entry))
++				if (!inet_diag_bc_run(nla_data(bc),
++						      nla_len(bc), &entry))
+ 					continue;
+ 			}
+ 
+-- 
+1.7.3.2
+
diff --git a/kernel.spec b/kernel.spec
index 2684a58be..b9326982b 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -733,6 +733,8 @@ Patch12308: fix-i8k-inline-asm.patch
 Patch12400: ipc-zero-struct-memory-for-compat-fns.patch
 Patch12401: ipc-shm-fix-information-leak-to-user.patch
 
+Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1363,6 +1365,9 @@ ApplyPatch ipc-zero-struct-memory-for-compat-fns.patch
 # rhbz#648656 (CVE-2010-4072)
 ApplyPatch ipc-shm-fix-information-leak-to-user.patch
 
+# rhbz#651264 (CVE-2010-3880)
+ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1979,6 +1984,7 @@ fi
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - zero struct memory in ipc compat (CVE-2010-4073) (#648658)
 - zero struct memory in ipc shm (CVE-2010-4072) (#648656)
+- fix logic error in INET_DIAG bytecode auditing (CVE-2010-3880) (#651264)
 
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git

From 18241e1172b8ac3a7f6c0225da7e3f7939eeb402 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 23 Nov 2010 11:42:09 -0500
Subject: [PATCH 32/56] posix-cpu-timers: workaround to suppress the problems
 with mt exec (rhbz#656264)

---
 kernel.spec                                   |  7 +++
 ...nd-to-suppress-problems-with-mt-exec.patch | 60 +++++++++++++++++++
 2 files changed, 67 insertions(+)
 create mode 100644 posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch

diff --git a/kernel.spec b/kernel.spec
index b9326982b..735d918bd 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -735,6 +735,8 @@ Patch12401: ipc-shm-fix-information-leak-to-user.patch
 
 Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 
+Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1368,6 +1370,9 @@ ApplyPatch ipc-shm-fix-information-leak-to-user.patch
 # rhbz#651264 (CVE-2010-3880)
 ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 
+# rhbz#656264
+ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1985,6 +1990,8 @@ fi
 - zero struct memory in ipc compat (CVE-2010-4073) (#648658)
 - zero struct memory in ipc shm (CVE-2010-4072) (#648656)
 - fix logic error in INET_DIAG bytecode auditing (CVE-2010-3880) (#651264)
+- posix-cpu-timers: workaround to suppress the problems with mt exec
+  (rhbz#656264)
 
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git
diff --git a/posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch b/posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
new file mode 100644
index 000000000..fbc4d7cce
--- /dev/null
+++ b/posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
@@ -0,0 +1,60 @@
+From 78dfb59b62d1a7735cd28fc2783e58c122954fae Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Fri, 5 Nov 2010 16:53:42 +0100
+Subject: posix-cpu-timers: workaround to suppress the problems with mt exec
+
+posix-cpu-timers.c correctly assumes that the dying process does
+posix_cpu_timers_exit_group() and removes all !CPUCLOCK_PERTHREAD
+timers from signal->cpu_timers list.
+
+But, it also assumes that timer->it.cpu.task is always the group
+leader, and thus the dead ->task means the dead thread group.
+
+This is obviously not true after de_thread() changes the leader.
+After that almost every posix_cpu_timer_ method has problems.
+
+It is not simple to fix this bug correctly. First of all, I think
+that timer->it.cpu should use struct pid instead of task_struct.
+Also, the locking should be reworked completely. In particular,
+tasklist_lock should not be used at all. This all needs a lot of
+nontrivial and hard-to-test changes.
+
+Change __exit_signal() to do posix_cpu_timers_exit_group() when
+the old leader dies during exec. This is not the fix, just the
+temporary hack to hide the problem for 2.6.37 and stable. IOW,
+this is obviously wrong but this is what we currently have anyway:
+cpu timers do not work after mt exec.
+
+In theory this change adds another race. The exiting leader can
+detach the timers which were attached to the new leader. However,
+the window between de_thread() and release_task() is small, we
+can pretend that sys_timer_create() was called before de_thread().
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ kernel/exit.c |    8 ++++++++
+ 1 files changed, 8 insertions(+), 0 deletions(-)
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 0312022..1eff9e4 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -95,6 +95,14 @@ static void __exit_signal(struct task_struct *tsk)
+ 		sig->tty = NULL;
+ 	} else {
+ 		/*
++		 * This can only happen if the caller is de_thread().
++		 * FIXME: this is the temporary hack, we should teach
++		 * posix-cpu-timers to handle this case correctly.
++		 */
++		if (unlikely(has_group_leader_pid(tsk)))
++			posix_cpu_timers_exit_group(tsk);
++
++		/*
+ 		 * If there is any task waiting for the group exit
+ 		 * then notify it:
+ 		 */
+-- 
+1.7.3.2
+

From 28f88fd9570cdd3db964e028ee9ebce3b4d8f75b Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 24 Nov 2010 06:54:32 -0500
Subject: [PATCH 33/56] fix graphics on hp 2530p which were broken due to pci
 crs fixes from upstream (korg#23542)

---
 kernel.spec                                   |  5 ++
 ...-alloc-pci-from-the-last-1M-below-4G.patch | 64 +++++++++++++++++++
 2 files changed, 69 insertions(+)
 create mode 100644 x86-never-alloc-pci-from-the-last-1M-below-4G.patch

diff --git a/kernel.spec b/kernel.spec
index 735d918bd..3beeafd73 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -715,6 +715,7 @@ Patch12204: linux-2.6-enable-more-pci-autosuspend.patch
 Patch12205: runtime_pm_fixups.patch
 
 Patch12225: pci-crs-fixes.patch
+Patch12226: x86-never-alloc-pci-from-the-last-1M-below-4G.patch
 
 Patch12300: btusb-macbookpro-7-1.patch
 Patch12301: btusb-macbookpro-6-2.patch
@@ -1344,6 +1345,7 @@ ApplyPatch runtime_pm_fixups.patch
 # PCI patches to fix problems with _CRS
 # ( from linux-pci list )
 ApplyPatch pci-crs-fixes.patch
+ApplyPatch x86-never-alloc-pci-from-the-last-1M-below-4G.patch
 
 ApplyPatch btusb-macbookpro-7-1.patch
 ApplyPatch btusb-macbookpro-6-2.patch
@@ -1986,6 +1988,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- Fix graphics on HP 2530p (korg#23542)
+
 * Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
 - zero struct memory in ipc compat (CVE-2010-4073) (#648658)
 - zero struct memory in ipc shm (CVE-2010-4072) (#648656)
diff --git a/x86-never-alloc-pci-from-the-last-1M-below-4G.patch b/x86-never-alloc-pci-from-the-last-1M-below-4G.patch
new file mode 100644
index 000000000..11c8304e1
--- /dev/null
+++ b/x86-never-alloc-pci-from-the-last-1M-below-4G.patch
@@ -0,0 +1,64 @@
+commit 0dda4d7a8d071c58aa22268fd784869b28b5381b
+Author: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Date:   Fri Nov 19 16:25:39 2010 -0700
+
+    x86/PCI: never allocate PCI space from the last 1M below 4G
+    
+    The last 1M before 4G contains the processor restart vector and usually
+    the system ROM.  We don't know the actual ROM size; I chose 1M because
+    that's how much Windows 7 appears to avoid.
+    
+    Without this check, we can allocate PCI space that will never work.  On
+    Matthew's HP 2530p, we put the Intel GTT "Flush Page" at the very last
+    page, which causes a spontaneous power-off:
+    
+      pci_root PNP0A08:00: host bridge window [mem 0xfee01000-0xffffffff]
+      fffff000-ffffffff : Intel Flush Page (assigned by intel-gtt)
+    
+    Reference: https://bugzilla.kernel.org/show_bug.cgi?id=23542
+    Reported-by: Matthew Garrett <mjg@redhat.com>
+    Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+
+diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
+index 5be1542..c1e908f 100644
+--- a/arch/x86/include/asm/e820.h
++++ b/arch/x86/include/asm/e820.h
+@@ -72,6 +72,9 @@ struct e820map {
+ #define BIOS_BEGIN		0x000a0000
+ #define BIOS_END		0x00100000
+ 
++#define BIOS_ROM_BASE		0xfff00000
++#define BIOS_ROM_END		0x100000000ULL
++
+ #ifdef __KERNEL__
+ /* see comment in arch/x86/kernel/e820.c */
+ extern struct e820map e820;
+diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
+index c4bb261..6890241 100644
+--- a/arch/x86/pci/i386.c
++++ b/arch/x86/pci/i386.c
+@@ -65,8 +65,14 @@ pcibios_align_resource(void *data, const struct resource *res,
+ 			resource_size_t size, resource_size_t align)
+ {
+ 	struct pci_dev *dev = data;
+-	resource_size_t start = round_down(res->end - size + 1, align);
++	resource_size_t start, end = res->end;
+ 
++	/* Make sure we don't allocate from the last 1M before 4G */
++	if (res->flags & IORESOURCE_MEM) {
++		if (end >= BIOS_ROM_BASE && end < BIOS_ROM_END)
++			end = BIOS_ROM_BASE - 1;
++	}
++	start = round_down(end - size + 1, align);
+ 	if (res->flags & IORESOURCE_IO) {
+ 
+ 		/*
+@@ -80,6 +86,8 @@ pcibios_align_resource(void *data, const struct resource *res,
+ 	} else if (res->flags & IORESOURCE_MEM) {
+ 		if (start < BIOS_END)
+ 			start = res->end;	/* fail; no space */
++		if (start >= BIOS_ROM_BASE && start < BIOS_ROM_END)
++			start = ALIGN(BIOS_ROM_END, align);
+ 	}
+ 	return start;
+ }

From f43af41c5a46912182c105f4bb4b50e5fabc1b2d Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 24 Nov 2010 11:47:22 -0500
Subject: [PATCH 34/56] __func -> __func__

---
 debug-tty-print-dev-name.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/debug-tty-print-dev-name.patch b/debug-tty-print-dev-name.patch
index 720f6632d..7b2e036a0 100644
--- a/debug-tty-print-dev-name.patch
+++ b/debug-tty-print-dev-name.patch
@@ -9,7 +9,7 @@ index 613c852..09c86d2 100644
 -	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
 +	if (!test_bit(TTY_LDISC, &tty->flags)) {
 +		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s\n",
-+			__func, tty->name);
++			__func__, tty->name);
 +		WARN_ON(1);
 +	}
  	mutex_unlock(&tty->ldisc_mutex);

From 1f26442712c394c62b90e80e27b07b978bfc9cb5 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 24 Nov 2010 14:13:32 -0500
Subject: [PATCH 35/56] drm/radeon/kms: MC vram map needs to be >= pci aperture
 size (fdo#28402)

---
 kernel.spec                                   |  7 ++++
 ...vram-map-needs-to-be-gt-pci-aperture.patch | 32 +++++++++++++++++++
 2 files changed, 39 insertions(+)
 create mode 100644 radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch

diff --git a/kernel.spec b/kernel.spec
index 3beeafd73..92bfc3d93 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -657,6 +657,8 @@ Patch1819: drm-intel-big-hammer.patch
 Patch1825: drm-intel-make-lvds-work.patch
 Patch1900: linux-2.6-intel-iommu-igfx.patch
 
+Patch1920: radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
+
 # linux1394 git patches
 Patch2200: linux-2.6-firewire-git-update.patch
 Patch2201: linux-2.6-firewire-git-pending.patch
@@ -1288,6 +1290,8 @@ ApplyOptionalPatch drm-nouveau-updates.patch
 #ApplyPatch drm-intel-make-lvds-work.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
+ApplyPatch radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
+
 # linux1394 git patches
 #ApplyPatch linux-2.6-firewire-git-update.patch
 #ApplyOptionalPatch linux-2.6-firewire-git-pending.patch
@@ -1988,6 +1992,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- drm/radeon/kms: MC vram map needs to be >= pci aperture size (fdo#28402)
+
 * Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
 - Fix graphics on HP 2530p (korg#23542)
 
diff --git a/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch b/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
new file mode 100644
index 000000000..88fa35e2f
--- /dev/null
+++ b/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
@@ -0,0 +1,32 @@
+commit b7d8cce5b558e0c0aa6898c9865356481598b46d
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date:   Mon Oct 25 19:44:00 2010 -0400
+
+    drm/radeon/kms: MC vram map needs to be >= pci aperture size
+    
+    The vram map in the radeon memory controller needs to be
+    >= the pci aperture size.  Fixes:
+    https://bugs.freedesktop.org/show_bug.cgi?id=28402
+    
+    The problematic cards in the above bug have 64 MB of vram,
+    but the pci aperture is 128 MB and the MC vram map was only
+    64 MB.  This can lead to hangs.
+    
+    Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+    Cc: stable@kernel.org
+    Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 6112ac9..6d1540c 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2270,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
+ 		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
+ 		 * Novell bug 204882 + along with lots of ubuntu ones
+ 		 */
++		if (rdev->mc.aper_size > config_aper_size)
++			config_aper_size = rdev->mc.aper_size;
++
+ 		if (config_aper_size > rdev->mc.real_vram_size)
+ 			rdev->mc.mc_vram_size = config_aper_size;
+ 		else

From a92338bddd9e14352f0f00a5addf242e1a1c54cf Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Wed, 24 Nov 2010 16:34:04 -0500
Subject: [PATCH 36/56] disable fscache for cifs (#656498)

---
 config-generic | 2 +-
 kernel.spec    | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/config-generic b/config-generic
index 67c580f6c..28597be1f 100644
--- a/config-generic
+++ b/config-generic
@@ -3460,7 +3460,7 @@ CONFIG_CIFS_EXPERIMENTAL=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-CONFIG_CIFS_FSCACHE=y
+# CONFIG_CIFS_FSCACHE is not set
 CONFIG_CIFS_WEAK_PW_HASH=y
 # CONFIG_CIFS_DEBUG2 is not set
 CONFIG_CIFS_DFS_UPCALL=y
diff --git a/kernel.spec b/kernel.spec
index 92bfc3d93..dc2cfa81b 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1992,6 +1992,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- Disable FSCACHE for CIFS until issues are addressed. (#656498)
+
 * Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
 - drm/radeon/kms: MC vram map needs to be >= pci aperture size (fdo#28402)
 

From 275037eacd0253b6949f1a389a671d0c2af04179 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Thu, 25 Nov 2010 10:02:15 -0500
Subject: [PATCH 37/56] and your little ldisc too

---
 debug-tty-print-dev-name.patch | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/debug-tty-print-dev-name.patch b/debug-tty-print-dev-name.patch
index 7b2e036a0..4b0b06633 100644
--- a/debug-tty-print-dev-name.patch
+++ b/debug-tty-print-dev-name.patch
@@ -8,8 +8,8 @@ index 613c852..09c86d2 100644
  	mutex_lock(&tty->ldisc_mutex);
 -	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
 +	if (!test_bit(TTY_LDISC, &tty->flags)) {
-+		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s\n",
-+			__func__, tty->name);
++		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s ldisc=%s\n",
++			__func__, tty->name, tty->ldisc ? tty->ldisc->name : NULL);
 +		WARN_ON(1);
 +	}
  	mutex_unlock(&tty->ldisc_mutex);

From e0db5a7a29754bc82ecc4212d3c5e31767245d95 Mon Sep 17 00:00:00 2001
From: kyle <kyle@mcmartin.ca>
Date: Thu, 25 Nov 2010 11:34:29 -0500
Subject: [PATCH 38/56] oops, it's ->ops->name

---
 debug-tty-print-dev-name.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/debug-tty-print-dev-name.patch b/debug-tty-print-dev-name.patch
index 4b0b06633..507dfe5f1 100644
--- a/debug-tty-print-dev-name.patch
+++ b/debug-tty-print-dev-name.patch
@@ -9,7 +9,7 @@ index 613c852..09c86d2 100644
 -	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
 +	if (!test_bit(TTY_LDISC, &tty->flags)) {
 +		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s ldisc=%s\n",
-+			__func__, tty->name, tty->ldisc ? tty->ldisc->name : NULL);
++			__func__, tty->name, tty->ldisc ? tty->ldisc->ops ? tty->ldisc->ops->name : NULL : NULL);
 +		WARN_ON(1);
 +	}
  	mutex_unlock(&tty->ldisc_mutex);

From 1fbeac414bb2911fc4730d1a9b7ef4c2a825623b Mon Sep 17 00:00:00 2001
From: kyle <kyle@mcmartin.ca>
Date: Thu, 25 Nov 2010 11:35:15 -0500
Subject: [PATCH 39/56] i loathe building debuginfo locally

---
 Makefile | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/Makefile b/Makefile
index bb2c37b39..f925397d1 100644
--- a/Makefile
+++ b/Makefile
@@ -99,6 +99,8 @@ debug:
 	@perl -pi -e 's/^%define debugbuildsenabled 1/%define debugbuildsenabled 0/' kernel.spec
 	@perl -pi -e 's/^%define rawhide_skip_docs 0/%define rawhide_skip_docs 1/' kernel.spec
 
+nodebuginfo:
+	@perl -pi -e 's/^%define with_debuginfo %\{\?_without_debuginfo: 0\} %\{\?\!_without_debuginfo: 1\}/%define with_debuginfo %\{\?_without_debuginfo: 0\} %\{\?\!_without_debuginfo: 0\}/' kernel.spec
 nodebug: release
 	@perl -pi -e 's/^%define debugbuildsenabled 1/%define debugbuildsenabled 0/' kernel.spec
 release:

From e7fe04dcca1b617a2dc6da3918159a81963e6831 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Fri, 26 Nov 2010 11:46:23 -0500
Subject: [PATCH 40/56] hda/realtek: handle unset external amp bits

---
 hda_realtek-handle-unset-external-amp-bits.patch | 12 ++++++++++++
 kernel.spec                                      |  8 ++++++++
 2 files changed, 20 insertions(+)
 create mode 100644 hda_realtek-handle-unset-external-amp-bits.patch

diff --git a/hda_realtek-handle-unset-external-amp-bits.patch b/hda_realtek-handle-unset-external-amp-bits.patch
new file mode 100644
index 000000000..8519fd32a
--- /dev/null
+++ b/hda_realtek-handle-unset-external-amp-bits.patch
@@ -0,0 +1,12 @@
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0ac6aed..53f503d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1614,6 +1614,7 @@ do_sku:
+ 		spec->init_amp = ALC_INIT_GPIO3;
+ 		break;
+ 	case 5:
++	default:
+ 		spec->init_amp = ALC_INIT_DEFAULT;
+ 		break;
+ 	}
diff --git a/kernel.spec b/kernel.spec
index dc2cfa81b..8b241d2b7 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -740,6 +740,8 @@ Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 
 Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
+Patch12407: hda_realtek-handle-unset-external-amp-bits.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1379,6 +1381,9 @@ ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 # rhbz#656264
 ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
+# rhbz#657388
+ApplyPatch hda_realtek-handle-unset-external-amp-bits.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1992,6 +1997,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
+- hda/realtek: handle unset external amp config (#657388)
+
 * Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
 - Disable FSCACHE for CIFS until issues are addressed. (#656498)
 

From 9a17bea44f336f738a74e1768cf1d93790d2c773 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Fri, 26 Nov 2010 14:48:47 -0500
Subject: [PATCH 41/56] plug various tty/serial stack leaks

---
 kernel.spec                                   |  10 +
 ...nt-changeover-for-other-main-devices.patch | 980 ++++++++++++++++++
 tty-make-tiocgicount-a-handler.patch          | 218 ++++
 3 files changed, 1208 insertions(+)
 create mode 100644 tty-icount-changeover-for-other-main-devices.patch
 create mode 100644 tty-make-tiocgicount-a-handler.patch

diff --git a/kernel.spec b/kernel.spec
index 8b241d2b7..e8e3109f0 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -742,6 +742,9 @@ Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
 Patch12407: hda_realtek-handle-unset-external-amp-bits.patch
 
+Patch12410: tty-make-tiocgicount-a-handler.patch
+Patch12411: tty-icount-changeover-for-other-main-devices.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1384,6 +1387,10 @@ ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 # rhbz#657388
 ApplyPatch hda_realtek-handle-unset-external-amp-bits.patch
 
+# CVE-2010-4077, CVE-2010-4075 (rhbz#648660, #648663)
+ApplyPatch tty-make-tiocgicount-a-handler.patch
+ApplyPatch tty-icount-changeover-for-other-main-devices.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -1997,6 +2004,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
+- Plug stack leaks in tty/serial drivers. (#648663, #648660)
+
 * Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
 - hda/realtek: handle unset external amp config (#657388)
 
diff --git a/tty-icount-changeover-for-other-main-devices.patch b/tty-icount-changeover-for-other-main-devices.patch
new file mode 100644
index 000000000..9ea0e4566
--- /dev/null
+++ b/tty-icount-changeover-for-other-main-devices.patch
@@ -0,0 +1,980 @@
+From 9f13e0aa6e05b9b773f952a435afdc0b4d10e5dc Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@linux.intel.com>
+Date: Thu, 16 Sep 2010 18:21:52 +0100
+Subject: [PATCH 2/2] tty: icount changeover for other main devices
+
+Again basically cut and paste
+
+Convert the main driver set to use the hooks for GICOUNT
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/ia64/hp/sim/simserial.c      |   12 +------
+ drivers/char/amiserial.c          |   56 ++++++++++++++++-------------
+ drivers/char/cyclades.c           |   49 +++++++++++++------------
+ drivers/char/ip2/ip2main.c        |   72 ++++++++++++++++++++++---------------
+ drivers/char/mxser.c              |   62 ++++++++++++++++++--------------
+ drivers/char/nozomi.c             |   37 +++++++++----------
+ drivers/char/pcmcia/synclink_cs.c |   60 ++++++++++++++-----------------
+ drivers/char/synclink.c           |   73 +++++++++++++++++--------------------
+ drivers/char/synclink_gt.c        |   55 +++++++++++++++-------------
+ drivers/char/synclinkmp.c         |   61 ++++++++++++++-----------------
+ drivers/serial/68360serial.c      |   51 +++++++++++++-------------
+ net/bluetooth/rfcomm/tty.c        |    4 --
+ 12 files changed, 297 insertions(+), 295 deletions(-)
+
+diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
+index 1e8d71a..13633da 100644
+--- a/arch/ia64/hp/sim/simserial.c
++++ b/arch/ia64/hp/sim/simserial.c
+@@ -395,7 +395,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
+ {
+ 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ 	    (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
+-	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	    (cmd != TIOCMIWAIT)) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -433,16 +433,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
+ 		case TIOCMIWAIT:
+ 			printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n");
+ 			return 0;
+-		/*
+-		 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-		 * Return: write counters to the user passed counter struct
+-		 * NB: both 1->0 and 0->1 transitions are counted except for
+-		 *     RI where only 0->1 is counted.
+-		 */
+-		case TIOCGICOUNT:
+-			printk(KERN_INFO "rs_ioctl: TIOCGICOUNT called\n");
+-			return 0;
+-
+ 		case TIOCSERGWILD:
+ 		case TIOCSERSWILD:
+ 			/* "setserial -W" is called in Debian boot */
+diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
+index a11c8c9..b0a7046 100644
+--- a/drivers/char/amiserial.c
++++ b/drivers/char/amiserial.c
+@@ -1263,6 +1263,36 @@ static int rs_break(struct tty_struct *tty, int break_state)
+ 	return 0;
+ }
+ 
++/*
++ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
++ * Return: write counters to the user passed counter struct
++ * NB: both 1->0 and 0->1 transitions are counted except for
++ *     RI where only 0->1 is counted.
++ */
++static int rs_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++{
++	struct async_struct *info = tty->driver_data;
++	struct async_icount cnow;
++	unsigned long flags;
++
++	local_irq_save(flags);
++	cnow = info->state->icount;
++	local_irq_restore(flags);
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++
++	return 0;
++}
+ 
+ static int rs_ioctl(struct tty_struct *tty, struct file * file,
+ 		    unsigned int cmd, unsigned long arg)
+@@ -1332,31 +1362,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
+ 			}
+ 			/* NOTREACHED */
+ 
+-		/* 
+-		 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-		 * Return: write counters to the user passed counter struct
+-		 * NB: both 1->0 and 0->1 transitions are counted except for
+-		 *     RI where only 0->1 is counted.
+-		 */
+-		case TIOCGICOUNT:
+-			local_irq_save(flags);
+-			cnow = info->state->icount;
+-			local_irq_restore(flags);
+-			icount.cts = cnow.cts;
+-			icount.dsr = cnow.dsr;
+-			icount.rng = cnow.rng;
+-			icount.dcd = cnow.dcd;
+-			icount.rx = cnow.rx;
+-			icount.tx = cnow.tx;
+-			icount.frame = cnow.frame;
+-			icount.overrun = cnow.overrun;
+-			icount.parity = cnow.parity;
+-			icount.brk = cnow.brk;
+-			icount.buf_overrun = cnow.buf_overrun;
+-
+-			if (copy_to_user(argp, &icount, sizeof(icount)))
+-				return -EFAULT;
+-			return 0;
+ 		case TIOCSERGWILD:
+ 		case TIOCSERSWILD:
+ 			/* "setserial -W" is called in Debian boot */
+@@ -1958,6 +1963,7 @@ static const struct tty_operations serial_ops = {
+ 	.wait_until_sent = rs_wait_until_sent,
+ 	.tiocmget = rs_tiocmget,
+ 	.tiocmset = rs_tiocmset,
++	.get_icount = rs_get_icount,
+ 	.proc_fops = &rs_proc_fops,
+ };
+ 
+diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
+index 27aad94..4f152c2 100644
+--- a/drivers/char/cyclades.c
++++ b/drivers/char/cyclades.c
+@@ -2790,29 +2790,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
+ 		 * NB: both 1->0 and 0->1 transitions are counted except for
+ 		 *     RI where only 0->1 is counted.
+ 		 */
+-	case TIOCGICOUNT: {
+-		struct serial_icounter_struct sic = { };
+-
+-		spin_lock_irqsave(&info->card->card_lock, flags);
+-		cnow = info->icount;
+-		spin_unlock_irqrestore(&info->card->card_lock, flags);
+-
+-		sic.cts = cnow.cts;
+-		sic.dsr = cnow.dsr;
+-		sic.rng = cnow.rng;
+-		sic.dcd = cnow.dcd;
+-		sic.rx = cnow.rx;
+-		sic.tx = cnow.tx;
+-		sic.frame = cnow.frame;
+-		sic.overrun = cnow.overrun;
+-		sic.parity = cnow.parity;
+-		sic.brk = cnow.brk;
+-		sic.buf_overrun = cnow.buf_overrun;
+-
+-		if (copy_to_user(argp, &sic, sizeof(sic)))
+-			ret_val = -EFAULT;
+-		break;
+-	}
+ 	default:
+ 		ret_val = -ENOIOCTLCMD;
+ 	}
+@@ -2823,6 +2800,31 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
+ 	return ret_val;
+ }				/* cy_ioctl */
+ 
++static int cy_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *sic)
++{
++	struct cyclades_port *info = tty->driver_data;
++	struct cyclades_icount cnow;	/* Used to snapshot */
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->card->card_lock, flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->card->card_lock, flags);
++
++	sic->cts = cnow.cts;
++	sic->dsr = cnow.dsr;
++	sic->rng = cnow.rng;
++	sic->dcd = cnow.dcd;
++	sic->rx = cnow.rx;
++	sic->tx = cnow.tx;
++	sic->frame = cnow.frame;
++	sic->overrun = cnow.overrun;
++	sic->parity = cnow.parity;
++	sic->brk = cnow.brk;
++	sic->buf_overrun = cnow.buf_overrun;
++	return 0;
++}
++
+ /*
+  * This routine allows the tty driver to be notified when
+  * device's termios settings have changed.  Note that a
+@@ -4084,6 +4086,7 @@ static const struct tty_operations cy_ops = {
+ 	.wait_until_sent = cy_wait_until_sent,
+ 	.tiocmget = cy_tiocmget,
+ 	.tiocmset = cy_tiocmset,
++	.get_icount = cy_get_icount,
+ 	.proc_fops = &cyclades_proc_fops,
+ };
+ 
+diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
+index d4b71e8..438272c 100644
+--- a/drivers/char/ip2/ip2main.c
++++ b/drivers/char/ip2/ip2main.c
+@@ -183,6 +183,8 @@ static void ip2_hangup(PTTY);
+ static int  ip2_tiocmget(struct tty_struct *tty, struct file *file);
+ static int  ip2_tiocmset(struct tty_struct *tty, struct file *file,
+ 			 unsigned int set, unsigned int clear);
++static int ip2_get_icount(struct tty_struct *tty,
++		struct serial_icounter_struct *icount);
+ 
+ static void set_irq(int, int);
+ static void ip2_interrupt_bh(struct work_struct *work);
+@@ -454,6 +456,7 @@ static const struct tty_operations ip2_ops = {
+ 	.hangup          = ip2_hangup,
+ 	.tiocmget	 = ip2_tiocmget,
+ 	.tiocmset	 = ip2_tiocmset,
++	.get_icount	 = ip2_get_icount,
+ 	.proc_fops	 = &ip2_proc_fops,
+ };
+ 
+@@ -2128,7 +2131,6 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
+ 	i2ChanStrPtr pCh = DevTable[tty->index];
+ 	i2eBordStrPtr pB;
+ 	struct async_icount cprev, cnow;	/* kernel counter temps */
+-	struct serial_icounter_struct __user *p_cuser;
+ 	int rc = 0;
+ 	unsigned long flags;
+ 	void __user *argp = (void __user *)arg;
+@@ -2297,34 +2299,6 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
+ 		break;
+ 
+ 	/*
+-	 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-	 * Return: write counters to the user passed counter struct
+-	 * NB: both 1->0 and 0->1 transitions are counted except for RI where
+-	 * only 0->1 is counted. The controller is quite capable of counting
+-	 * both, but this done to preserve compatibility with the standard
+-	 * serial driver.
+-	 */
+-	case TIOCGICOUNT:
+-		ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc );
+-
+-		write_lock_irqsave(&pB->read_fifo_spinlock, flags);
+-		cnow = pCh->icount;
+-		write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
+-		p_cuser = argp;
+-		rc = put_user(cnow.cts, &p_cuser->cts);
+-		rc = put_user(cnow.dsr, &p_cuser->dsr);
+-		rc = put_user(cnow.rng, &p_cuser->rng);
+-		rc = put_user(cnow.dcd, &p_cuser->dcd);
+-		rc = put_user(cnow.rx, &p_cuser->rx);
+-		rc = put_user(cnow.tx, &p_cuser->tx);
+-		rc = put_user(cnow.frame, &p_cuser->frame);
+-		rc = put_user(cnow.overrun, &p_cuser->overrun);
+-		rc = put_user(cnow.parity, &p_cuser->parity);
+-		rc = put_user(cnow.brk, &p_cuser->brk);
+-		rc = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
+-		break;
+-
+-	/*
+ 	 * The rest are not supported by this driver. By returning -ENOIOCTLCMD they
+ 	 * will be passed to the line discipline for it to handle.
+ 	 */
+@@ -2348,6 +2322,46 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
+ 	return rc;
+ }
+ 
++static int ip2_get_icount(struct tty_struct *tty,
++		struct serial_icounter_struct *icount)
++{
++	i2ChanStrPtr pCh = DevTable[tty->index];
++	i2eBordStrPtr pB;
++	struct async_icount cnow;	/* kernel counter temp */
++	unsigned long flags;
++
++	if ( pCh == NULL )
++		return -ENODEV;
++
++	pB = pCh->pMyBord;
++
++	/*
++	 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
++	 * Return: write counters to the user passed counter struct
++	 * NB: both 1->0 and 0->1 transitions are counted except for RI where
++	 * only 0->1 is counted. The controller is quite capable of counting
++	 * both, but this done to preserve compatibility with the standard
++	 * serial driver.
++	 */
++
++	write_lock_irqsave(&pB->read_fifo_spinlock, flags);
++	cnow = pCh->icount;
++	write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++	return 0;
++}
++
+ /******************************************************************************/
+ /* Function:   GetSerialInfo()                                                */
+ /* Parameters: Pointer to channel structure                                   */
+diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
+index 3fc89da..b3704ae7 100644
+--- a/drivers/char/mxser.c
++++ b/drivers/char/mxser.c
+@@ -1700,7 +1700,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
+ 		return 0;
+ 	}
+ 
+-	if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT &&
++	if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT &&
+ 			test_bit(TTY_IO_ERROR, &tty->flags))
+ 		return -EIO;
+ 
+@@ -1730,32 +1730,6 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
+ 
+ 		return wait_event_interruptible(info->port.delta_msr_wait,
+ 				mxser_cflags_changed(info, arg, &cnow));
+-	/*
+-	 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-	 * Return: write counters to the user passed counter struct
+-	 * NB: both 1->0 and 0->1 transitions are counted except for
+-	 *     RI where only 0->1 is counted.
+-	 */
+-	case TIOCGICOUNT: {
+-		struct serial_icounter_struct icnt = { 0 };
+-		spin_lock_irqsave(&info->slock, flags);
+-		cnow = info->icount;
+-		spin_unlock_irqrestore(&info->slock, flags);
+-
+-		icnt.frame = cnow.frame;
+-		icnt.brk = cnow.brk;
+-		icnt.overrun = cnow.overrun;
+-		icnt.buf_overrun = cnow.buf_overrun;
+-		icnt.parity = cnow.parity;
+-		icnt.rx = cnow.rx;
+-		icnt.tx = cnow.tx;
+-		icnt.cts = cnow.cts;
+-		icnt.dsr = cnow.dsr;
+-		icnt.rng = cnow.rng;
+-		icnt.dcd = cnow.dcd;
+-
+-		return copy_to_user(argp, &icnt, sizeof(icnt)) ? -EFAULT : 0;
+-	}
+ 	case MOXA_HighSpeedOn:
+ 		return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
+ 	case MOXA_SDS_RSTICOUNTER:
+@@ -1828,6 +1802,39 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
+ 	return 0;
+ }
+ 
++	/*
++	 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
++	 * Return: write counters to the user passed counter struct
++	 * NB: both 1->0 and 0->1 transitions are counted except for
++	 *     RI where only 0->1 is counted.
++	 */
++
++static int mxser_get_icount(struct tty_struct *tty,
++		struct serial_icounter_struct *icount)
++
++{
++	struct mxser_port *info = tty->driver_data;
++	struct async_icount cnow;
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->slock, flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->slock, flags);
++
++	icount->frame = cnow.frame;
++	icount->brk = cnow.brk;
++	icount->overrun = cnow.overrun;
++	icount->buf_overrun = cnow.buf_overrun;
++	icount->parity = cnow.parity;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	return 0;
++}
++
+ static void mxser_stoprx(struct tty_struct *tty)
+ {
+ 	struct mxser_port *info = tty->driver_data;
+@@ -2326,6 +2333,7 @@ static const struct tty_operations mxser_ops = {
+ 	.wait_until_sent = mxser_wait_until_sent,
+ 	.tiocmget = mxser_tiocmget,
+ 	.tiocmset = mxser_tiocmset,
++	.get_icount = mxser_get_icount,
+ };
+ 
+ struct tty_port_operations mxser_port_ops = {
+diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
+index 817169c..dd3f9b1 100644
+--- a/drivers/char/nozomi.c
++++ b/drivers/char/nozomi.c
+@@ -1804,24 +1804,24 @@ static int ntty_cflags_changed(struct port *port, unsigned long flags,
+ 	return ret;
+ }
+ 
+-static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
++static int ntty_tiocgicount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
+ {
++	struct port *port = tty->driver_data;
+ 	const struct async_icount cnow = port->tty_icount;
+-	struct serial_icounter_struct icount;
+-
+-	icount.cts = cnow.cts;
+-	icount.dsr = cnow.dsr;
+-	icount.rng = cnow.rng;
+-	icount.dcd = cnow.dcd;
+-	icount.rx = cnow.rx;
+-	icount.tx = cnow.tx;
+-	icount.frame = cnow.frame;
+-	icount.overrun = cnow.overrun;
+-	icount.parity = cnow.parity;
+-	icount.brk = cnow.brk;
+-	icount.buf_overrun = cnow.buf_overrun;
+-
+-	return copy_to_user(argp, &icount, sizeof(icount)) ? -EFAULT : 0;
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++	return 0;
+ }
+ 
+ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
+@@ -1840,9 +1840,7 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
+ 		rval = wait_event_interruptible(port->tty_wait,
+ 				ntty_cflags_changed(port, arg, &cprev));
+ 		break;
+-	} case TIOCGICOUNT:
+-		rval = ntty_ioctl_tiocgicount(port, argp);
+-		break;
++	}
+ 	default:
+ 		DBG1("ERR: 0x%08X, %d", cmd, cmd);
+ 		break;
+@@ -1922,6 +1920,7 @@ static const struct tty_operations tty_ops = {
+ 	.chars_in_buffer = ntty_chars_in_buffer,
+ 	.tiocmget = ntty_tiocmget,
+ 	.tiocmset = ntty_tiocmset,
++	.get_icount = ntty_tiocgicount,
+ 	.install = ntty_install,
+ 	.cleanup = ntty_cleanup,
+ };
+diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
+index 45f9fad..7404809 100644
+--- a/drivers/char/pcmcia/synclink_cs.c
++++ b/drivers/char/pcmcia/synclink_cs.c
+@@ -2215,6 +2215,32 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
+ 	return 0;
+ }
+ 
++static int mgslpc_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++{
++	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
++	struct mgsl_icount cnow;	/* kernel counter temps */
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->lock,flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->lock,flags);
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++
++	return 0;
++}
++
+ /* Service an IOCTL request
+  *
+  * Arguments:
+@@ -2230,11 +2256,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
+ 			unsigned int cmd, unsigned long arg)
+ {
+ 	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+-	int error;
+-	struct mgsl_icount cnow;	/* kernel counter temps */
+-	struct serial_icounter_struct __user *p_cuser;	/* user space */
+ 	void __user *argp = (void __user *)arg;
+-	unsigned long flags;
+ 
+ 	if (debug_level >= DEBUG_LEVEL_INFO)
+ 		printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
+@@ -2244,7 +2266,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
+ 		return -ENODEV;
+ 
+ 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+-	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	    (cmd != TIOCMIWAIT)) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -2274,34 +2296,6 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
+ 		return wait_events(info, argp);
+ 	case TIOCMIWAIT:
+ 		return modem_input_wait(info,(int)arg);
+-	case TIOCGICOUNT:
+-		spin_lock_irqsave(&info->lock,flags);
+-		cnow = info->icount;
+-		spin_unlock_irqrestore(&info->lock,flags);
+-		p_cuser = argp;
+-		PUT_USER(error,cnow.cts, &p_cuser->cts);
+-		if (error) return error;
+-		PUT_USER(error,cnow.dsr, &p_cuser->dsr);
+-		if (error) return error;
+-		PUT_USER(error,cnow.rng, &p_cuser->rng);
+-		if (error) return error;
+-		PUT_USER(error,cnow.dcd, &p_cuser->dcd);
+-		if (error) return error;
+-		PUT_USER(error,cnow.rx, &p_cuser->rx);
+-		if (error) return error;
+-		PUT_USER(error,cnow.tx, &p_cuser->tx);
+-		if (error) return error;
+-		PUT_USER(error,cnow.frame, &p_cuser->frame);
+-		if (error) return error;
+-		PUT_USER(error,cnow.overrun, &p_cuser->overrun);
+-		if (error) return error;
+-		PUT_USER(error,cnow.parity, &p_cuser->parity);
+-		if (error) return error;
+-		PUT_USER(error,cnow.brk, &p_cuser->brk);
+-		if (error) return error;
+-		PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
+-		if (error) return error;
+-		return 0;
+ 	default:
+ 		return -ENOIOCTLCMD;
+ 	}
+diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
+index a2a5800..3a6824f 100644
+--- a/drivers/char/synclink.c
++++ b/drivers/char/synclink.c
+@@ -2925,6 +2925,38 @@ static int mgsl_break(struct tty_struct *tty, int break_state)
+ 	
+ }	/* end of mgsl_break() */
+ 
++/*
++ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
++ * Return: write counters to the user passed counter struct
++ * NB: both 1->0 and 0->1 transitions are counted except for
++ *     RI where only 0->1 is counted.
++ */
++static int msgl_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++
++{
++	struct mgsl_struct * info = tty->driver_data;
++	struct mgsl_icount cnow;	/* kernel counter temps */
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->irq_spinlock,flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->irq_spinlock,flags);
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++	return 0;
++}
++
+ /* mgsl_ioctl()	Service an IOCTL request
+  * 	
+  * Arguments:
+@@ -2949,7 +2981,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
+ 		return -ENODEV;
+ 
+ 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+-	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	    (cmd != TIOCMIWAIT)) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -2959,11 +2991,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
+ 
+ static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
+ {
+-	int error;
+-	struct mgsl_icount cnow;	/* kernel counter temps */
+ 	void __user *argp = (void __user *)arg;
+-	struct serial_icounter_struct __user *p_cuser;	/* user space */
+-	unsigned long flags;
+ 	
+ 	switch (cmd) {
+ 		case MGSL_IOCGPARAMS:
+@@ -2992,40 +3020,6 @@ static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigne
+ 		case TIOCMIWAIT:
+ 			return modem_input_wait(info,(int)arg);
+ 
+-		/* 
+-		 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-		 * Return: write counters to the user passed counter struct
+-		 * NB: both 1->0 and 0->1 transitions are counted except for
+-		 *     RI where only 0->1 is counted.
+-		 */
+-		case TIOCGICOUNT:
+-			spin_lock_irqsave(&info->irq_spinlock,flags);
+-			cnow = info->icount;
+-			spin_unlock_irqrestore(&info->irq_spinlock,flags);
+-			p_cuser = argp;
+-			PUT_USER(error,cnow.cts, &p_cuser->cts);
+-			if (error) return error;
+-			PUT_USER(error,cnow.dsr, &p_cuser->dsr);
+-			if (error) return error;
+-			PUT_USER(error,cnow.rng, &p_cuser->rng);
+-			if (error) return error;
+-			PUT_USER(error,cnow.dcd, &p_cuser->dcd);
+-			if (error) return error;
+-			PUT_USER(error,cnow.rx, &p_cuser->rx);
+-			if (error) return error;
+-			PUT_USER(error,cnow.tx, &p_cuser->tx);
+-			if (error) return error;
+-			PUT_USER(error,cnow.frame, &p_cuser->frame);
+-			if (error) return error;
+-			PUT_USER(error,cnow.overrun, &p_cuser->overrun);
+-			if (error) return error;
+-			PUT_USER(error,cnow.parity, &p_cuser->parity);
+-			if (error) return error;
+-			PUT_USER(error,cnow.brk, &p_cuser->brk);
+-			if (error) return error;
+-			PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
+-			if (error) return error;
+-			return 0;
+ 		default:
+ 			return -ENOIOCTLCMD;
+ 	}
+@@ -4328,6 +4322,7 @@ static const struct tty_operations mgsl_ops = {
+ 	.hangup = mgsl_hangup,
+ 	.tiocmget = tiocmget,
+ 	.tiocmset = tiocmset,
++	.get_icount = msgl_get_icount,
+ 	.proc_fops = &mgsl_proc_fops,
+ };
+ 
+diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
+index e63b830..1746d91 100644
+--- a/drivers/char/synclink_gt.c
++++ b/drivers/char/synclink_gt.c
+@@ -1032,9 +1032,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 		 unsigned int cmd, unsigned long arg)
+ {
+ 	struct slgt_info *info = tty->driver_data;
+-	struct mgsl_icount cnow;	/* kernel counter temps */
+-	struct serial_icounter_struct __user *p_cuser;	/* user space */
+-	unsigned long flags;
+ 	void __user *argp = (void __user *)arg;
+ 	int ret;
+ 
+@@ -1043,7 +1040,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 	DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
+ 
+ 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+-	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	    (cmd != TIOCMIWAIT)) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -1053,24 +1050,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 		return wait_mgsl_event(info, argp);
+ 	case TIOCMIWAIT:
+ 		return modem_input_wait(info,(int)arg);
+-	case TIOCGICOUNT:
+-		spin_lock_irqsave(&info->lock,flags);
+-		cnow = info->icount;
+-		spin_unlock_irqrestore(&info->lock,flags);
+-		p_cuser = argp;
+-		if (put_user(cnow.cts, &p_cuser->cts) ||
+-		    put_user(cnow.dsr, &p_cuser->dsr) ||
+-		    put_user(cnow.rng, &p_cuser->rng) ||
+-		    put_user(cnow.dcd, &p_cuser->dcd) ||
+-		    put_user(cnow.rx, &p_cuser->rx) ||
+-		    put_user(cnow.tx, &p_cuser->tx) ||
+-		    put_user(cnow.frame, &p_cuser->frame) ||
+-		    put_user(cnow.overrun, &p_cuser->overrun) ||
+-		    put_user(cnow.parity, &p_cuser->parity) ||
+-		    put_user(cnow.brk, &p_cuser->brk) ||
+-		    put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
+-			return -EFAULT;
+-		return 0;
+ 	case MGSL_IOCSGPIO:
+ 		return set_gpio(info, argp);
+ 	case MGSL_IOCGGPIO:
+@@ -1117,6 +1096,33 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 	return ret;
+ }
+ 
++static int get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++
++{
++	struct slgt_info *info = tty->driver_data;
++	struct mgsl_icount cnow;	/* kernel counter temps */
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->lock,flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->lock,flags);
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++
++	return 0;
++}
++
+ /*
+  * support for 32 bit ioctl calls on 64 bit systems
+  */
+@@ -1206,10 +1212,6 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
+ 	case MGSL_IOCSGPIO:
+ 	case MGSL_IOCGGPIO:
+ 	case MGSL_IOCWAITGPIO:
+-	case TIOCGICOUNT:
+-		rc = ioctl(tty, file, cmd, (unsigned long)(compat_ptr(arg)));
+-		break;
+-
+ 	case MGSL_IOCSTXIDLE:
+ 	case MGSL_IOCTXENABLE:
+ 	case MGSL_IOCRXENABLE:
+@@ -3642,6 +3644,7 @@ static const struct tty_operations ops = {
+ 	.hangup = hangup,
+ 	.tiocmget = tiocmget,
+ 	.tiocmset = tiocmset,
++	.get_icount = get_icount,
+ 	.proc_fops = &synclink_gt_proc_fops,
+ };
+ 
+diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
+index e56caf7..2f9eb4b 100644
+--- a/drivers/char/synclinkmp.c
++++ b/drivers/char/synclinkmp.c
+@@ -1258,10 +1258,6 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 		 unsigned int cmd, unsigned long arg)
+ {
+ 	SLMP_INFO *info = tty->driver_data;
+-	int error;
+-	struct mgsl_icount cnow;	/* kernel counter temps */
+-	struct serial_icounter_struct __user *p_cuser;	/* user space */
+-	unsigned long flags;
+ 	void __user *argp = (void __user *)arg;
+ 
+ 	if (debug_level >= DEBUG_LEVEL_INFO)
+@@ -1272,7 +1268,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 		return -ENODEV;
+ 
+ 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+-	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	    (cmd != TIOCMIWAIT)) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -1310,40 +1306,38 @@ static int ioctl(struct tty_struct *tty, struct file *file,
+ 		 * NB: both 1->0 and 0->1 transitions are counted except for
+ 		 *     RI where only 0->1 is counted.
+ 		 */
+-	case TIOCGICOUNT:
+-		spin_lock_irqsave(&info->lock,flags);
+-		cnow = info->icount;
+-		spin_unlock_irqrestore(&info->lock,flags);
+-		p_cuser = argp;
+-		PUT_USER(error,cnow.cts, &p_cuser->cts);
+-		if (error) return error;
+-		PUT_USER(error,cnow.dsr, &p_cuser->dsr);
+-		if (error) return error;
+-		PUT_USER(error,cnow.rng, &p_cuser->rng);
+-		if (error) return error;
+-		PUT_USER(error,cnow.dcd, &p_cuser->dcd);
+-		if (error) return error;
+-		PUT_USER(error,cnow.rx, &p_cuser->rx);
+-		if (error) return error;
+-		PUT_USER(error,cnow.tx, &p_cuser->tx);
+-		if (error) return error;
+-		PUT_USER(error,cnow.frame, &p_cuser->frame);
+-		if (error) return error;
+-		PUT_USER(error,cnow.overrun, &p_cuser->overrun);
+-		if (error) return error;
+-		PUT_USER(error,cnow.parity, &p_cuser->parity);
+-		if (error) return error;
+-		PUT_USER(error,cnow.brk, &p_cuser->brk);
+-		if (error) return error;
+-		PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
+-		if (error) return error;
+-		return 0;
+ 	default:
+ 		return -ENOIOCTLCMD;
+ 	}
+ 	return 0;
+ }
+ 
++static int get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++{
++	SLMP_INFO *info = tty->driver_data;
++	struct mgsl_icount cnow;	/* kernel counter temps */
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->lock,flags);
++	cnow = info->icount;
++	spin_unlock_irqrestore(&info->lock,flags);
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++	icount->rx = cnow.rx;
++	icount->tx = cnow.tx;
++	icount->frame = cnow.frame;
++	icount->overrun = cnow.overrun;
++	icount->parity = cnow.parity;
++	icount->brk = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
++
++	return 0;
++}
++
+ /*
+  * /proc fs routines....
+  */
+@@ -3909,6 +3903,7 @@ static const struct tty_operations ops = {
+ 	.hangup = hangup,
+ 	.tiocmget = tiocmget,
+ 	.tiocmset = tiocmset,
++	.get_icount = get_icount,
+ 	.proc_fops = &synclinkmp_proc_fops,
+ };
+ 
+diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
+index 0dff3bb..88b1335 100644
+--- a/drivers/serial/68360serial.c
++++ b/drivers/serial/68360serial.c
+@@ -1381,6 +1381,30 @@ static void send_break(ser_info_t *info, unsigned int duration)
+ }
+ 
+ 
++/*
++ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
++ * Return: write counters to the user passed counter struct
++ * NB: both 1->0 and 0->1 transitions are counted except for
++ *     RI where only 0->1 is counted.
++ */
++static int rs_360_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++{
++	ser_info_t *info = (ser_info_t *)tty->driver_data;
++	struct async_icount cnow;
++
++	local_irq_disable();
++	cnow = info->state->icount;
++	local_irq_enable();
++
++	icount->cts = cnow.cts;
++	icount->dsr = cnow.dsr;
++	icount->rng = cnow.rng;
++	icount->dcd = cnow.dcd;
++
++	return 0;
++}
++
+ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
+ 		    unsigned int cmd, unsigned long arg)
+ {
+@@ -1394,7 +1418,7 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
+ 	if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ 		return -ENODEV;
+ 
+-	if ((cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
++	if (cmd != TIOCMIWAIT) {
+ 		if (tty->flags & (1 << TTY_IO_ERROR))
+ 		    return -EIO;
+ 	}
+@@ -1477,31 +1501,6 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
+ 			return 0;
+ #endif
+ 
+-		/* 
+-		 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+-		 * Return: write counters to the user passed counter struct
+-		 * NB: both 1->0 and 0->1 transitions are counted except for
+-		 *     RI where only 0->1 is counted.
+-		 */
+-		case TIOCGICOUNT:
+-			local_irq_disable();
+-			cnow = info->state->icount;
+-			local_irq_enable();
+-			p_cuser = (struct serial_icounter_struct *) arg;
+-/* 			error = put_user(cnow.cts, &p_cuser->cts); */
+-/* 			if (error) return error; */
+-/* 			error = put_user(cnow.dsr, &p_cuser->dsr); */
+-/* 			if (error) return error; */
+-/* 			error = put_user(cnow.rng, &p_cuser->rng); */
+-/* 			if (error) return error; */
+-/* 			error = put_user(cnow.dcd, &p_cuser->dcd); */
+-/* 			if (error) return error; */
+-
+-			put_user(cnow.cts, &p_cuser->cts);
+-			put_user(cnow.dsr, &p_cuser->dsr);
+-			put_user(cnow.rng, &p_cuser->rng);
+-			put_user(cnow.dcd, &p_cuser->dcd);
+-			return 0;
+ 
+ 		default:
+ 			return -ENOIOCTLCMD;
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index befc3a5..84c2a4d 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -844,10 +844,6 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned
+ 		BT_DBG("TIOCMIWAIT");
+ 		break;
+ 
+-	case TIOCGICOUNT:
+-		BT_DBG("TIOCGICOUNT");
+-		break;
+-
+ 	case TIOCGSERIAL:
+ 		BT_ERR("TIOCGSERIAL is not supported");
+ 		return -ENOIOCTLCMD;
+-- 
+1.7.3.2
+
diff --git a/tty-make-tiocgicount-a-handler.patch b/tty-make-tiocgicount-a-handler.patch
new file mode 100644
index 000000000..fb88aea7d
--- /dev/null
+++ b/tty-make-tiocgicount-a-handler.patch
@@ -0,0 +1,218 @@
+From e2047e3ffc61042a3228cd4228cd95ab0dad1d72 Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@linux.intel.com>
+Date: Thu, 16 Sep 2010 18:21:24 +0100
+Subject: [PATCH 1/2] tty: Make tiocgicount a handler
+
+Dan Rosenberg noted that various drivers return the struct with uncleared
+fields. Instead of spending forever trying to stomp all the drivers that
+get it wrong (and every new driver) do the job in one place.
+
+This first patch adds the needed operations and hooks them up, including
+the needed USB midlayer and serial core plumbing.
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/char/tty_io.c           |   21 +++++++++++++++++++++
+ drivers/serial/serial_core.c    |   37 +++++++++++++++++--------------------
+ drivers/usb/serial/usb-serial.c |   13 +++++++++++++
+ include/linux/tty_driver.h      |    9 +++++++++
+ include/linux/usb/serial.h      |    2 ++
+ 5 files changed, 62 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+index 613c852..68d7713 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -96,6 +96,7 @@
+ #include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/seq_file.h>
++#include <linux/serial.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/system.h>
+@@ -2502,6 +2503,20 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
+ 	return tty->ops->tiocmset(tty, file, set, clear);
+ }
+ 
++static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
++{
++	int retval = -EINVAL;
++	struct serial_icounter_struct icount;
++	memset(&icount, 0, sizeof(icount));
++	if (tty->ops->get_icount)
++		retval = tty->ops->get_icount(tty, &icount);
++	if (retval != 0)
++		return retval;
++	if (copy_to_user(arg, &icount, sizeof(icount)))
++		return -EFAULT;
++	return 0;
++}
++
+ struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+ {
+ 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+@@ -2622,6 +2637,12 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case TIOCMBIC:
+ 	case TIOCMBIS:
+ 		return tty_tiocmset(tty, file, cmd, p);
++	case TIOCGICOUNT:
++		retval = tty_tiocgicount(tty, p);
++		/* For the moment allow fall through to the old method */
++        	if (retval != -EINVAL)
++			return retval;
++		break;
+ 	case TCFLSH:
+ 		switch (arg) {
+ 		case TCIFLUSH:
+diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
+index cd85112..2ee0aec 100644
+--- a/drivers/serial/serial_core.c
++++ b/drivers/serial/serial_core.c
+@@ -1074,10 +1074,10 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg)
+  * NB: both 1->0 and 0->1 transitions are counted except for
+  *     RI where only 0->1 is counted.
+  */
+-static int uart_get_count(struct uart_state *state,
+-			  struct serial_icounter_struct __user *icnt)
++static int uart_get_icount(struct tty_struct *tty,
++			  struct serial_icounter_struct *icount)
+ {
+-	struct serial_icounter_struct icount;
++	struct uart_state *state = tty->driver_data;
+ 	struct uart_icount cnow;
+ 	struct uart_port *uport = state->uart_port;
+ 
+@@ -1085,19 +1085,19 @@ static int uart_get_count(struct uart_state *state,
+ 	memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
+ 	spin_unlock_irq(&uport->lock);
+ 
+-	icount.cts         = cnow.cts;
+-	icount.dsr         = cnow.dsr;
+-	icount.rng         = cnow.rng;
+-	icount.dcd         = cnow.dcd;
+-	icount.rx          = cnow.rx;
+-	icount.tx          = cnow.tx;
+-	icount.frame       = cnow.frame;
+-	icount.overrun     = cnow.overrun;
+-	icount.parity      = cnow.parity;
+-	icount.brk         = cnow.brk;
+-	icount.buf_overrun = cnow.buf_overrun;
++	icount->cts         = cnow.cts;
++	icount->dsr         = cnow.dsr;
++	icount->rng         = cnow.rng;
++	icount->dcd         = cnow.dcd;
++	icount->rx          = cnow.rx;
++	icount->tx          = cnow.tx;
++	icount->frame       = cnow.frame;
++	icount->overrun     = cnow.overrun;
++	icount->parity      = cnow.parity;
++	icount->brk         = cnow.brk;
++	icount->buf_overrun = cnow.buf_overrun;
+ 
+-	return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0;
++	return 0;
+ }
+ 
+ /*
+@@ -1150,10 +1150,6 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
+ 	case TIOCMIWAIT:
+ 		ret = uart_wait_modem_status(state, arg);
+ 		break;
+-
+-	case TIOCGICOUNT:
+-		ret = uart_get_count(state, uarg);
+-		break;
+ 	}
+ 
+ 	if (ret != -ENOIOCTLCMD)
+@@ -2283,6 +2279,7 @@ static const struct tty_operations uart_ops = {
+ #endif
+ 	.tiocmget	= uart_tiocmget,
+ 	.tiocmset	= uart_tiocmset,
++	.get_icount	= uart_get_icount,
+ #ifdef CONFIG_CONSOLE_POLL
+ 	.poll_init	= uart_poll_init,
+ 	.poll_get_char	= uart_poll_get_char,
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 7a2177c..e64da74 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -519,6 +519,18 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
+ 	return -EINVAL;
+ }
+ 
++static int serial_get_icount(struct tty_struct *tty,
++				struct serial_icounter_struct *icount)
++{
++	struct usb_serial_port *port = tty->driver_data;
++
++	dbg("%s - port %d", __func__, port->number);
++
++	if (port->serial->type->get_icount)
++		return port->serial->type->get_icount(tty, icount);
++	return -EINVAL;
++}
++
+ /*
+  * We would be calling tty_wakeup here, but unfortunately some line
+  * disciplines have an annoying habit of calling tty->write from
+@@ -1195,6 +1207,7 @@ static const struct tty_operations serial_ops = {
+ 	.chars_in_buffer =	serial_chars_in_buffer,
+ 	.tiocmget =		serial_tiocmget,
+ 	.tiocmset =		serial_tiocmset,
++	.get_icount = 		serial_get_icount,
+ 	.cleanup = 		serial_cleanup,
+ 	.install = 		serial_install,
+ 	.proc_fops =		&serial_proc_fops,
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index b086779..db2d227 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -224,6 +224,12 @@
+  *	unless the tty also has a valid tty->termiox pointer.
+  *
+  *	Optional: Called under the termios lock
++ *
++ * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
++ *
++ *	Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
++ *	structure to complete. This method is optional and will only be called
++ *	if provided (otherwise EINVAL will be returned).
+  */
+ 
+ #include <linux/fs.h>
+@@ -232,6 +238,7 @@
+ 
+ struct tty_struct;
+ struct tty_driver;
++struct serial_icounter_struct;
+ 
+ struct tty_operations {
+ 	struct tty_struct * (*lookup)(struct tty_driver *driver,
+@@ -268,6 +275,8 @@ struct tty_operations {
+ 			unsigned int set, unsigned int clear);
+ 	int (*resize)(struct tty_struct *tty, struct winsize *ws);
+ 	int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
++	int (*get_icount)(struct tty_struct *tty,
++				struct serial_icounter_struct *icount);
+ #ifdef CONFIG_CONSOLE_POLL
+ 	int (*poll_init)(struct tty_driver *driver, int line, char *options);
+ 	int (*poll_get_char)(struct tty_driver *driver, int line);
+diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
+index 55675b1..16d682f 100644
+--- a/include/linux/usb/serial.h
++++ b/include/linux/usb/serial.h
+@@ -271,6 +271,8 @@ struct usb_serial_driver {
+ 	int  (*tiocmget)(struct tty_struct *tty, struct file *file);
+ 	int  (*tiocmset)(struct tty_struct *tty, struct file *file,
+ 			 unsigned int set, unsigned int clear);
++	int  (*get_icount)(struct tty_struct *tty,
++			struct serial_icounter_struct *icount);
+ 	/* Called by the tty layer for port level work. There may or may not
+ 	   be an attached tty at this point */
+ 	void (*dtr_rts)(struct usb_serial_port *port, int on);
+-- 
+1.7.3.2
+

From 25202fe3cf8722b1f81bb83e40df136c720c8a22 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Fri, 26 Nov 2010 15:21:31 -0500
Subject: [PATCH 42/56] quiet a build warning the previous INET_DIAG change
 caused

---
 kernel.spec                                   |  5 ++++
 ...ake-nlmsg_find_attr-take-a-const-ptr.patch | 29 +++++++++++++++++++
 2 files changed, 34 insertions(+)
 create mode 100644 netlink-make-nlmsg_find_attr-take-a-const-ptr.patch

diff --git a/kernel.spec b/kernel.spec
index e8e3109f0..72152fb9f 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -737,6 +737,7 @@ Patch12400: ipc-zero-struct-memory-for-compat-fns.patch
 Patch12401: ipc-shm-fix-information-leak-to-user.patch
 
 Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+Patch12408: netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
 
 Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
@@ -1380,6 +1381,7 @@ ApplyPatch ipc-shm-fix-information-leak-to-user.patch
 
 # rhbz#651264 (CVE-2010-3880)
 ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+ApplyPatch netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
 
 # rhbz#656264
 ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
@@ -2004,6 +2006,9 @@ fi
 #                 ||     ||
 
 %changelog
+* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
+- Quiet a build warning the previous INET_DIAG fix caused.
+
 * Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
 - Plug stack leaks in tty/serial drivers. (#648663, #648660)
 
diff --git a/netlink-make-nlmsg_find_attr-take-a-const-ptr.patch b/netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
new file mode 100644
index 000000000..5b75ca400
--- /dev/null
+++ b/netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
@@ -0,0 +1,29 @@
+From 38f1f0db010ac5b981ae06f1fe2fd64095ebb171 Mon Sep 17 00:00:00 2001
+From: Nelson Elhage <nelhage@ksplice.com>
+Date: Wed, 3 Nov 2010 16:35:40 +0000
+Subject: [PATCH] netlink: Make nlmsg_find_attr take a const nlmsghdr*.
+
+This will let us use it on a nlmsghdr stored inside a netlink_callback.
+
+Signed-off-by: Nelson Elhage <nelhage@ksplice.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/netlink.h |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index a63b219..c344646 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+  *
+  * Returns the first attribute which matches the specified type.
+  */
+-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
++static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
+ 					     int hdrlen, int attrtype)
+ {
+ 	return nla_find(nlmsg_attrdata(nlh, hdrlen),
+-- 
+1.7.3.2
+

From 769514dd9989ba2d968bc7514740013e24a82bb8 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 29 Nov 2010 09:19:24 -0500
Subject: [PATCH 43/56] tty fixes from git and tpm fix from f14

---
 kernel.spec                                   | 17 ++++
 tpm-autodetect-itpm-devices.patch             | 65 ++++++++++++++
 ...-allow-reopen-when-ldisc-is-changing.patch | 84 +++++++++++++++++++
 tty-ldisc-fix-open-flag-handling.patch        | 54 ++++++++++++
 tty-open-hangup-race-fixup.patch              | 76 +++++++++++++++++
 5 files changed, 296 insertions(+)
 create mode 100644 tpm-autodetect-itpm-devices.patch
 create mode 100644 tty-dont-allow-reopen-when-ldisc-is-changing.patch
 create mode 100644 tty-ldisc-fix-open-flag-handling.patch
 create mode 100644 tty-open-hangup-race-fixup.patch

diff --git a/kernel.spec b/kernel.spec
index 72152fb9f..f1357e774 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -746,6 +746,12 @@ Patch12407: hda_realtek-handle-unset-external-amp-bits.patch
 Patch12410: tty-make-tiocgicount-a-handler.patch
 Patch12411: tty-icount-changeover-for-other-main-devices.patch
 
+Patch12413: tpm-autodetect-itpm-devices.patch
+
+Patch12415: tty-dont-allow-reopen-when-ldisc-is-changing.patch
+Patch12416: tty-ldisc-fix-open-flag-handling.patch
+Patch12417: tty-open-hangup-race-fixup.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1393,6 +1399,12 @@ ApplyPatch hda_realtek-handle-unset-external-amp-bits.patch
 ApplyPatch tty-make-tiocgicount-a-handler.patch
 ApplyPatch tty-icount-changeover-for-other-main-devices.patch
 
+ApplyPatch tpm-autodetect-itpm-devices.patch
+
+ApplyPatch tty-dont-allow-reopen-when-ldisc-is-changing.patch
+ApplyPatch tty-ldisc-fix-open-flag-handling.patch
+ApplyPatch tty-open-hangup-race-fixup.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -2006,6 +2018,11 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com>
+- tpm-autodetect-itpm-devices.patch: Auto-fix TPM issues on various
+  laptops which prevented suspend/resume. (#647132)
+- tty fixes from kernel-git (#630464)
+
 * Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
 - Quiet a build warning the previous INET_DIAG fix caused.
 
diff --git a/tpm-autodetect-itpm-devices.patch b/tpm-autodetect-itpm-devices.patch
new file mode 100644
index 000000000..57b5d07ed
--- /dev/null
+++ b/tpm-autodetect-itpm-devices.patch
@@ -0,0 +1,65 @@
+commit 8cf5102c84dba60b2ea29b7e89f1a65100e20bb9
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Oct 21 17:31:56 2010 -0400
+
+    tpm: Autodetect itpm devices
+    
+    Some Lenovos have TPMs that require a quirk to function correctly. This can
+    be autodetected by checking whether the device has a _HID of INTC0102. This
+    is an invalid PNPid, and as such is discarded by the pnp layer - however
+    it's still present in the ACPI code, so we can pull it out that way. This
+    means that the quirk won't be automatically applied on non-ACPI systems,
+    but without ACPI we don't have any way to identify the chip anyway so I
+    don't think that's a great concern.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 1030f84..c17a305 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -25,6 +25,7 @@
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+ #include <linux/wait.h>
++#include <linux/acpi.h>
+ #include "tpm.h"
+ 
+ #define TPM_HEADER_SIZE 10
+@@ -78,6 +79,26 @@ enum tis_defaults {
+ static LIST_HEAD(tis_chips);
+ static DEFINE_SPINLOCK(tis_lock);
+ 
++#ifdef CONFIG_ACPI
++static int is_itpm(struct pnp_dev *dev)
++{
++	struct acpi_device *acpi = pnp_acpi_device(dev);
++	struct acpi_hardware_id *id;
++
++	list_for_each_entry(id, &acpi->pnp.ids, list) {
++		if (!strcmp("INTC0102", id->id))
++			return 1;
++	}
++
++	return 0;
++}
++#else
++static int is_itpm(struct pnp_dev *dev)
++{
++	return 0;
++}
++#endif
++
+ static int check_locality(struct tpm_chip *chip, int l)
+ {
+ 	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
+@@ -472,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
+ 		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ 
++	if (is_itpm(to_pnp_dev(dev)))
++		itpm = 1;
++
+ 	if (itpm)
+ 		dev_info(dev, "Intel iTPM workaround enabled\n");
+ 
diff --git a/tty-dont-allow-reopen-when-ldisc-is-changing.patch b/tty-dont-allow-reopen-when-ldisc-is-changing.patch
new file mode 100644
index 000000000..4b822d5f9
--- /dev/null
+++ b/tty-dont-allow-reopen-when-ldisc-is-changing.patch
@@ -0,0 +1,84 @@
+From jirislaby@gmail.com Thu Nov 25 12:16:42 2010
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: [PATCH 1/1] TTY: don't allow reopen when ldisc is changing
+Date: Thu, 25 Nov 2010 18:16:23 +0100
+
+There are many WARNINGs like the following reported nowadays:
+WARNING: at drivers/tty/tty_io.c:1331 tty_open+0x2a2/0x49a()
+Hardware name: Latitude E6500
+Modules linked in:
+Pid: 1207, comm: plymouthd Not tainted 2.6.37-rc3-mmotm1123 #3
+Call Trace:
+ [<ffffffff8103b189>] warn_slowpath_common+0x80/0x98
+ [<ffffffff8103b1b6>] warn_slowpath_null+0x15/0x17
+ [<ffffffff8128a3ab>] tty_open+0x2a2/0x49a
+ [<ffffffff810fd53f>] chrdev_open+0x11d/0x146
+...
+
+This means tty_reopen is called without TTY_LDISC set. For further
+considerations, note tty_lock is held in tty_open. TTY_LDISC is cleared in:
+1) __tty_hangup from tty_ldisc_hangup to tty_ldisc_enable. During this
+section tty_lock is held.
+
+2) tty_release via tty_ldisc_release till the end of tty existence. If
+tty->count <= 1, tty_lock is taken, TTY_CLOSING bit set and then
+tty_ldisc_release called. tty_reopen checks TTY_CLOSING before checking
+TTY_LDISC.
+
+3) tty_set_ldisc from tty_ldisc_halt to tty_ldisc_enable. We:
+   * take tty_lock, set TTY_LDISC_CHANGING, put tty_lock
+   * call tty_ldisc_halt (clear TTY_LDISC), tty_lock is _not_ held
+   * do some other work
+   * take tty_lock, call tty_ldisc_enable (set TTY_LDISC), put
+     tty_lock
+
+So the only option I see is 3). The solution is to check
+TTY_LDISC_CHANGING along with TTY_CLOSING in tty_reopen.
+
+Nicely reproducible with two processes:
+while (1) {
+	fd = open("/dev/ttyS1", O_RDWR);
+	if (fd < 0) {
+		warn("open");
+		continue;
+	}
+	close(fd);
+}
+--------
+while (1) {
+        fd = open("/dev/ttyS1", O_RDWR);
+        ld1 = 0; ld2 = 2;
+        while (1) {
+                ioctl(fd, TIOCSETD, &ld1);
+                ioctl(fd, TIOCSETD, &ld2);
+        }
+        close(fd);
+}
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Reported-by: <Valdis.Kletnieks@vt.edu>
+Cc: Kyle McMartin <kyle@mcmartin.ca>
+Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
+---
+ drivers/tty/tty_io.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index c05c5af..878f6d6 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -1310,7 +1310,8 @@ static int tty_reopen(struct tty_struct *tty)
+ {
+ 	struct tty_driver *driver = tty->driver;
+ 
+-	if (test_bit(TTY_CLOSING, &tty->flags))
++	if (test_bit(TTY_CLOSING, &tty->flags) ||
++			test_bit(TTY_LDISC_CHANGING, &tty->flags))
+ 		return -EIO;
+ 
+ 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
+-- 
+1.7.3.1
+
+
+
diff --git a/tty-ldisc-fix-open-flag-handling.patch b/tty-ldisc-fix-open-flag-handling.patch
new file mode 100644
index 000000000..72c67a976
--- /dev/null
+++ b/tty-ldisc-fix-open-flag-handling.patch
@@ -0,0 +1,54 @@
+From linux-kernel-owner@vger.kernel.org Wed Nov 24 18:28:11 2010
+From:	Jiri Slaby <jslaby@suse.cz>
+Subject: [PATCH 1/2] TTY: ldisc, fix open flag handling
+Date:	Thu, 25 Nov 2010 00:27:54 +0100
+
+When a concrete ldisc open fails in tty_ldisc_open, we forget to clear
+TTY_LDISC_OPEN. This causes a false warning on the next ldisc open:
+WARNING: at drivers/char/tty_ldisc.c:445 tty_ldisc_open+0x26/0x38()
+Hardware name: System Product Name
+Modules linked in: ...
+Pid: 5251, comm: a.out Tainted: G        W  2.6.32-5-686 #1
+Call Trace:
+ [<c1030321>] ? warn_slowpath_common+0x5e/0x8a
+ [<c1030357>] ? warn_slowpath_null+0xa/0xc
+ [<c119311c>] ? tty_ldisc_open+0x26/0x38
+ [<c11936c5>] ? tty_set_ldisc+0x218/0x304
+...
+
+So clear the bit when failing...
+
+Introduced in c65c9bc3efa (tty: rewrite the ldisc locking) back in
+2.6.31-rc1.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Cc: Alan Cox <alan@linux.intel.com>
+Reported-by: Sergey Lapin <slapin@ossfans.org>
+Tested-by: Sergey Lapin <slapin@ossfans.org>
+---
+ drivers/tty/tty_ldisc.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index d8e96b0..4214d58 100644
+--- a/drivers/char/tty_ldisc.c
++++ b/drivers/char/tty_ldisc.c
+@@ -454,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
+                 /* BTM here locks versus a hangup event */
+ 		WARN_ON(!tty_locked());
+ 		ret = ld->ops->open(tty);
++		if (ret)
++			clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ 		return ret;
+ 	}
+ 	return 0;
+-- 
+1.7.3.1
+
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at  http://www.tux.org/lkml/
+
diff --git a/tty-open-hangup-race-fixup.patch b/tty-open-hangup-race-fixup.patch
new file mode 100644
index 000000000..bfd29ecf7
--- /dev/null
+++ b/tty-open-hangup-race-fixup.patch
@@ -0,0 +1,76 @@
+From 9e88e8b9915b5e067507a087437d80e6a133d612 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Sat, 27 Nov 2010 16:06:46 +0100
+Subject: [PATCH 1/1] TTY: open/hangup race fixup
+
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/tty/tty_io.c |   10 +++++++++-
+ include/linux/tty.h  |    1 +
+ 2 files changed, 10 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 878f6d6..35480dd 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -559,6 +559,9 @@ void __tty_hangup(struct tty_struct *tty)
+ 
+ 	tty_lock();
+ 
++	/* some functions below drop BTM, so we need this bit */
++	set_bit(TTY_HUPPING, &tty->flags);
++
+ 	/* inuse_filps is protected by the single tty lock,
+ 	   this really needs to change if we want to flush the
+ 	   workqueue with the lock held */
+@@ -578,6 +581,10 @@ void __tty_hangup(struct tty_struct *tty)
+ 	}
+ 	spin_unlock(&tty_files_lock);
+ 
++	/*
++	 * it drops BTM and thus races with reopen
++	 * we protect the race by TTY_HUPPING
++	 */
+ 	tty_ldisc_hangup(tty);
+ 
+ 	read_lock(&tasklist_lock);
+@@ -615,7 +622,6 @@ void __tty_hangup(struct tty_struct *tty)
+ 	tty->session = NULL;
+ 	tty->pgrp = NULL;
+ 	tty->ctrl_status = 0;
+-	set_bit(TTY_HUPPED, &tty->flags);
+ 	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ 
+ 	/* Account for the p->signal references we killed */
+@@ -641,6 +647,7 @@ void __tty_hangup(struct tty_struct *tty)
+ 	 * can't yet guarantee all that.
+ 	 */
+ 	set_bit(TTY_HUPPED, &tty->flags);
++	clear_bit(TTY_HUPPING, &tty->flags);
+ 	tty_ldisc_enable(tty);
+ 
+ 	tty_unlock();
+@@ -1311,6 +1318,7 @@ static int tty_reopen(struct tty_struct *tty)
+ 	struct tty_driver *driver = tty->driver;
+ 
+ 	if (test_bit(TTY_CLOSING, &tty->flags) ||
++			test_bit(TTY_HUPPING, &tty->flags) ||
+ 			test_bit(TTY_LDISC_CHANGING, &tty->flags))
+ 		return -EIO;
+ 
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 032d79f..54e4eaa 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -366,6 +366,7 @@ struct tty_file_private {
+ #define TTY_HUPPED 		18	/* Post driver->hangup() */
+ #define TTY_FLUSHING		19	/* Flushing to ldisc in progress */
+ #define TTY_FLUSHPENDING	20	/* Queued buffer flush pending */
++#define TTY_HUPPING 		21	/* ->hangup() in progress */
+ 
+ #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+ 
+-- 
+1.7.3.1
+

From 8c2ac42a63fb79ce9d5c6e6be713561b618f6858 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 29 Nov 2010 09:21:37 -0500
Subject: [PATCH 44/56] bump baserelease to 2.6.36.1-10

---
 kernel.spec | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index f1357e774..2c0d62d71 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 9
+%global baserelease 10
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -2018,7 +2018,7 @@ fi
 #                 ||     ||
 
 %changelog
-* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com>
+* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-10
 - tpm-autodetect-itpm-devices.patch: Auto-fix TPM issues on various
   laptops which prevented suspend/resume. (#647132)
 - tty fixes from kernel-git (#630464)

From ed1195f0f7adad86cb7bef6b78e87e32a93e2e4c Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 29 Nov 2010 20:14:17 -0500
Subject: [PATCH 45/56] add a patch to log pnp resources

---
 kernel.spec                                  |  9 +++
 pnp-log-pnp-resources-as-we-do-for-pci.patch | 84 ++++++++++++++++++++
 2 files changed, 93 insertions(+)
 create mode 100644 pnp-log-pnp-resources-as-we-do-for-pci.patch

diff --git a/kernel.spec b/kernel.spec
index 2c0d62d71..c90b027a7 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -611,6 +611,8 @@ Patch204: linux-2.6-debug-always-inline-kzalloc.patch
 
 Patch210: debug-tty-print-dev-name.patch
 
+Patch300: pnp-log-pnp-resources-as-we-do-for-pci.patch
+
 Patch380: linux-2.6-defaults-pci_no_msi.patch
 Patch381: linux-2.6-defaults-pci_use_crs.patch
 Patch383: linux-2.6-defaults-aspm.patch
@@ -1238,6 +1240,9 @@ ApplyPatch linux-2.6-defaults-pci_use_crs.patch
 # enable ASPM by default on hardware we expect to work
 ApplyPatch linux-2.6-defaults-aspm.patch
 
+# helps debug resource conflicts [c1f3f281]
+ApplyPatch pnp-log-pnp-resources-as-we-do-for-pci.patch
+
 ApplyPatch ima-allow-it-to-be-completely-disabled-and-default-off.patch
 
 #
@@ -2018,6 +2023,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com>
+- PNP: log PNP resources, as we do for PCI [c1f3f281]
+  should help us debug resource conflicts (requested by bjorn.)
+
 * Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-10
 - tpm-autodetect-itpm-devices.patch: Auto-fix TPM issues on various
   laptops which prevented suspend/resume. (#647132)
diff --git a/pnp-log-pnp-resources-as-we-do-for-pci.patch b/pnp-log-pnp-resources-as-we-do-for-pci.patch
new file mode 100644
index 000000000..e3b8a2e77
--- /dev/null
+++ b/pnp-log-pnp-resources-as-we-do-for-pci.patch
@@ -0,0 +1,84 @@
+From 2d28c74b75ef2900e7016eef29a5ab9a4174b1f0 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Date: Wed, 29 Sep 2010 12:24:23 -0600
+Subject: PNP: log PNP resources, as we do for PCI
+
+ACPI devices are often involved in address space conflicts with PCI devices,
+so I think it's worth logging the resources they use.  Otherwise we have to
+depend on lspnp or groping around in sysfs to find them.
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+---
+ drivers/pnp/core.c     |    5 +++--
+ drivers/pnp/resource.c |   10 +++++-----
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
+index 88b3cde..53a8f33 100644
+--- a/drivers/pnp/core.c
++++ b/drivers/pnp/core.c
+@@ -194,8 +194,9 @@ int pnp_add_device(struct pnp_dev *dev)
+ 	for (id = dev->id; id; id = id->next)
+ 		len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
+ 
+-	pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
+-		dev->protocol->name, buf, dev->active ? "active" : "disabled");
++	dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
++		   dev->protocol->name, buf,
++		   dev->active ? "active" : "disabled");
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
+index e3446ab..a925e6b 100644
+--- a/drivers/pnp/resource.c
++++ b/drivers/pnp/resource.c
+@@ -523,7 +523,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
+ 	res->start = irq;
+ 	res->end = irq;
+ 
+-	pnp_dbg(&dev->dev, "  add %pr\n", res);
++	dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ 	return pnp_res;
+ }
+ 
+@@ -544,7 +544,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
+ 	res->start = dma;
+ 	res->end = dma;
+ 
+-	pnp_dbg(&dev->dev, "  add %pr\n", res);
++	dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ 	return pnp_res;
+ }
+ 
+@@ -568,7 +568,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
+ 	res->start = start;
+ 	res->end = end;
+ 
+-	pnp_dbg(&dev->dev, "  add %pr\n", res);
++	dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ 	return pnp_res;
+ }
+ 
+@@ -592,7 +592,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
+ 	res->start = start;
+ 	res->end = end;
+ 
+-	pnp_dbg(&dev->dev, "  add %pr\n", res);
++	dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ 	return pnp_res;
+ }
+ 
+@@ -616,7 +616,7 @@ struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev,
+ 	res->start = start;
+ 	res->end = end;
+ 
+-	pnp_dbg(&dev->dev, "  add %pr\n", res);
++	dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
+ 	return pnp_res;
+ }
+ 
+-- 
+1.7.3.2
+

From 311bf8c0a909df19d46199a82607382d27fa48e8 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Mon, 29 Nov 2010 21:07:53 -0500
Subject: [PATCH 46/56] un-rebase drm-intel-big-hammer & make-lvds-work, update
 would_have_oomkilled

---
 drm-intel-2.6.37-rc2.patch                    | 24423 ----------------
 drm-intel-big-hammer.patch                    |    16 +-
 drm-intel-make-lvds-work.patch                |    19 +-
 kernel.spec                                   |     7 +-
 linux-2.6-debug-vm-would-have-oomkilled.patch |    69 +-
 5 files changed, 57 insertions(+), 24477 deletions(-)
 delete mode 100644 drm-intel-2.6.37-rc2.patch

diff --git a/drm-intel-2.6.37-rc2.patch b/drm-intel-2.6.37-rc2.patch
deleted file mode 100644
index e2a9d28bc..000000000
--- a/drm-intel-2.6.37-rc2.patch
+++ /dev/null
@@ -1,24423 +0,0 @@
- drivers/char/agp/Makefile               |    1 +
- drivers/char/agp/intel-agp.c            |  201 +---
- drivers/char/agp/intel-agp.h            |   43 +-
- drivers/char/agp/intel-gtt.c            | 1614 ++++++++++-----------
- drivers/gpu/drm/drm_edid.c              |   92 +-
- drivers/gpu/drm/i915/Makefile           |    4 +-
- drivers/gpu/drm/i915/dvo_ch7017.c       |   66 +-
- drivers/gpu/drm/i915/dvo_ch7xxx.c       |   10 +-
- drivers/gpu/drm/i915/dvo_ivch.c         |   10 +-
- drivers/gpu/drm/i915/dvo_sil164.c       |   10 +-
- drivers/gpu/drm/i915/dvo_tfp410.c       |   10 +-
- drivers/gpu/drm/i915/i915_debugfs.c     |  337 ++++-
- drivers/gpu/drm/i915/i915_dma.c         |  360 ++----
- drivers/gpu/drm/i915/i915_drv.c         |  219 ++-
- drivers/gpu/drm/i915/i915_drv.h         |  272 +++--
- drivers/gpu/drm/i915/i915_gem.c         | 2292 +++++++++++++++---------------
- drivers/gpu/drm/i915/i915_gem_debug.c   |  148 ++-
- drivers/gpu/drm/i915/i915_gem_evict.c   |   72 +-
- drivers/gpu/drm/i915/i915_gem_tiling.c  |   54 +-
- drivers/gpu/drm/i915/i915_irq.c         |  259 ++--
- drivers/gpu/drm/i915/i915_reg.h         |  335 +++--
- drivers/gpu/drm/i915/i915_suspend.c     |   32 +-
- drivers/gpu/drm/i915/intel_acpi.c       |  286 ++++
- drivers/gpu/drm/i915/intel_bios.c       |  234 +++-
- drivers/gpu/drm/i915/intel_bios.h       |    6 +-
- drivers/gpu/drm/i915/intel_crt.c        |  127 +-
- drivers/gpu/drm/i915/intel_display.c    | 2374 ++++++++++++++++---------------
- drivers/gpu/drm/i915/intel_dp.c         |  658 ++++++---
- drivers/gpu/drm/i915/intel_drv.h        |  161 ++-
- drivers/gpu/drm/i915/intel_dvo.c        |   69 +-
- drivers/gpu/drm/i915/intel_fb.c         |   29 +-
- drivers/gpu/drm/i915/intel_hdmi.c       |  193 ++-
- drivers/gpu/drm/i915/intel_i2c.c        |  484 +++++--
- drivers/gpu/drm/i915/intel_lvds.c       |  445 +++---
- drivers/gpu/drm/i915/intel_modes.c      |   16 +-
- drivers/gpu/drm/i915/intel_opregion.c   |  517 +++++++
- drivers/gpu/drm/i915/intel_overlay.c    | 1004 +++++++------
- drivers/gpu/drm/i915/intel_panel.c      |  109 ++
- drivers/gpu/drm/i915/intel_ringbuffer.c |  580 +++++---
- drivers/gpu/drm/i915/intel_ringbuffer.h |   84 +-
- drivers/gpu/drm/i915/intel_sdvo.c       | 1076 +++++++--------
- drivers/gpu/drm/i915/intel_tv.c         |  165 +--
- include/drm/drm_crtc.h                  |    1 +
- include/drm/drm_dp_helper.h             |    3 +
- include/drm/i915_drm.h                  |    6 +-
- include/drm/intel-gtt.h                 |   18 +
- 46 files changed, 8590 insertions(+), 6496 deletions(-)
-
-diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
-index 627f542..8eb56e2 100644
---- a/drivers/char/agp/Makefile
-+++ b/drivers/char/agp/Makefile
-@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)	+= hp-agp.o
- obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
- obj-$(CONFIG_AGP_I460)		+= i460-agp.o
- obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
-+obj-$(CONFIG_AGP_INTEL)		+= intel-gtt.o
- obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
- obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
- obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
-diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
-index cd18493..e72f49d 100644
---- a/drivers/char/agp/intel-agp.c
-+++ b/drivers/char/agp/intel-agp.c
-@@ -12,9 +12,6 @@
- #include <asm/smp.h>
- #include "agp.h"
- #include "intel-agp.h"
--#include <linux/intel-gtt.h>
--
--#include "intel-gtt.c"
- 
- int intel_agp_enabled;
- EXPORT_SYMBOL(intel_agp_enabled);
-@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
- 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
- };
- 
--static int find_gmch(u16 device)
--{
--	struct pci_dev *gmch_device;
--
--	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
--	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
--		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
--					     device, gmch_device);
--	}
--
--	if (!gmch_device)
--		return 0;
--
--	intel_private.pcidev = gmch_device;
--	return 1;
--}
--
- /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
-  * driver and gmch_driver must be non-null, and find_gmch will determine
-  * which one should be used if a gmch_chip_id is present.
-  */
--static const struct intel_driver_description {
-+static const struct intel_agp_driver_description {
- 	unsigned int chip_id;
--	unsigned int gmch_chip_id;
- 	char *name;
- 	const struct agp_bridge_driver *driver;
--	const struct agp_bridge_driver *gmch_driver;
- } intel_agp_chipsets[] = {
--	{ PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
--		NULL, &intel_810_driver },
--	{ PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
--		NULL, &intel_810_driver },
--	{ PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
--		NULL, &intel_810_driver },
--	{ PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
--		&intel_815_driver, &intel_810_driver },
--	{ PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
--		&intel_830mp_driver, &intel_830_driver },
--	{ PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
--		&intel_845_driver, &intel_830_driver },
--	{ PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
--		&intel_845_driver, &intel_830_driver },
--	{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
--		&intel_845_driver, &intel_830_driver },
--	{ PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
--		&intel_845_driver, &intel_830_driver },
--	{ PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
--		NULL, &intel_915_driver },
--	{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
--		NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
--	{ PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
--		NULL, &intel_g33_driver },
--	{ PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
--		NULL, &intel_g33_driver },
--	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
--		NULL, &intel_g33_driver },
--	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
--		NULL, &intel_g33_driver },
--	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
--		NULL, &intel_g33_driver },
--	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
--	    "GM45", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
--	    "Eaglelake", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
--	    "Q45/Q43", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
--	    "G45/G43", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
--	    "B43", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
--	    "B43", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
--	    "G41", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
--	    "HD Graphics", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
--	    "HD Graphics", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
--	    "HD Graphics", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
--	    "HD Graphics", NULL, &intel_i965_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
--	    "Sandybridge", NULL, &intel_gen6_driver },
--	{ 0, 0, NULL, NULL, NULL }
-+	{ PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
-+	{ PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
-+	{ PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
-+	{ PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
-+	{ PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
-+	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
-+	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
-+	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
-+	{ PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
-+	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
-+	{ PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
-+	{ PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
-+	{ PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
-+	{ 0, NULL, NULL }
- };
- 
--static int __devinit intel_gmch_probe(struct pci_dev *pdev,
--				      struct agp_bridge_data *bridge)
--{
--	int i, mask;
--
--	bridge->driver = NULL;
--
--	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
--		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
--			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
--			bridge->driver =
--				intel_agp_chipsets[i].gmch_driver;
--			break;
--		}
--	}
--
--	if (!bridge->driver)
--		return 0;
--
--	bridge->dev_private_data = &intel_private;
--	bridge->dev = pdev;
--
--	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
--
--	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
--		mask = 40;
--	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
--		mask = 36;
--	else
--		mask = 32;
--
--	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
--		dev_err(&intel_private.pcidev->dev,
--			"set gfx device dma mask %d-bit failed!\n", mask);
--	else
--		pci_set_consistent_dma_mask(intel_private.pcidev,
--					    DMA_BIT_MASK(mask));
--
--	return 1;
--}
--
- static int __devinit agp_intel_probe(struct pci_dev *pdev,
- 				     const struct pci_device_id *ent)
- {
-@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
- 		}
- 	}
- 
--	if (intel_agp_chipsets[i].name == NULL) {
-+	if (!bridge->driver) {
- 		if (cap_ptr)
- 			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
- 				 pdev->vendor, pdev->device);
-@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
- 		return -ENODEV;
- 	}
- 
--	if (!bridge->driver) {
--		if (cap_ptr)
--			dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
--			    	 intel_agp_chipsets[i].gmch_chip_id);
--		agp_put_bridge(bridge);
--		return -ENODEV;
--	}
--
- 	bridge->dev = pdev;
- 	bridge->dev_private_data = NULL;
- 
-@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
- 
- 	agp_remove_bridge(bridge);
- 
--	if (intel_private.pcidev)
--		pci_dev_put(intel_private.pcidev);
-+	intel_gmch_remove(pdev);
- 
- 	agp_put_bridge(bridge);
- }
-@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
- 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
- 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
- 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
-+	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
- 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
- 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
- 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
-diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
-index d09b1ab..90539df 100644
---- a/drivers/char/agp/intel-agp.h
-+++ b/drivers/char/agp/intel-agp.h
-@@ -215,44 +215,7 @@
- #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB		0x0108  /* Server */
- #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG		0x010A
- 
--/* cover 915 and 945 variants */
--#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
--
--#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
--		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
--
--#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
--
--#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
--
--#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
--
--#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
--		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
--		IS_SNB)
--
-+int intel_gmch_probe(struct pci_dev *pdev,
-+			       struct agp_bridge_data *bridge);
-+void intel_gmch_remove(struct pci_dev *pdev);
- #endif
-diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
-index 75e0a34..9272c38 100644
---- a/drivers/char/agp/intel-gtt.c
-+++ b/drivers/char/agp/intel-gtt.c
-@@ -15,6 +15,18 @@
-  * /fairy-tale-mode off
-  */
- 
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/pagemap.h>
-+#include <linux/agp_backend.h>
-+#include <asm/smp.h>
-+#include "agp.h"
-+#include "intel-agp.h"
-+#include <linux/intel-gtt.h>
-+#include <drm/intel-gtt.h>
-+
- /*
-  * If we have Intel graphics, we're not going to have anything other than
-  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
-@@ -23,11 +35,12 @@
-  */
- #ifdef CONFIG_DMAR
- #define USE_PCI_DMA_API 1
-+#else
-+#define USE_PCI_DMA_API 0
- #endif
- 
- /* Max amount of stolen space, anything above will be returned to Linux */
- int intel_max_stolen = 32 * 1024 * 1024;
--EXPORT_SYMBOL(intel_max_stolen);
- 
- static const struct aper_size_info_fixed intel_i810_sizes[] =
- {
-@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
- #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
- #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
- 
--static struct gatt_mask intel_gen6_masks[] =
--{
--	{.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
--	 .type = INTEL_AGP_UNCACHED_MEMORY },
--	{.mask = I810_PTE_VALID | GEN6_PTE_LLC,
--         .type = INTEL_AGP_CACHED_MEMORY_LLC },
--	{.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
--         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
--	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
--         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
--	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
--         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
-+struct intel_gtt_driver {
-+	unsigned int gen : 8;
-+	unsigned int is_g33 : 1;
-+	unsigned int is_pineview : 1;
-+	unsigned int is_ironlake : 1;
-+	unsigned int dma_mask_size : 8;
-+	/* Chipset specific GTT setup */
-+	int (*setup)(void);
-+	/* This should undo anything done in ->setup() save the unmapping
-+	 * of the mmio register file, that's done in the generic code. */
-+	void (*cleanup)(void);
-+	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
-+	/* Flags is a more or less chipset specific opaque value.
-+	 * For chipsets that need to support old ums (non-gem) code, this
-+	 * needs to be identical to the various supported agp memory types! */
-+	bool (*check_flags)(unsigned int flags);
-+	void (*chipset_flush)(void);
- };
- 
- static struct _intel_private {
-+	struct intel_gtt base;
-+	const struct intel_gtt_driver *driver;
- 	struct pci_dev *pcidev;	/* device one */
-+	struct pci_dev *bridge_dev;
- 	u8 __iomem *registers;
-+	phys_addr_t gtt_bus_addr;
-+	phys_addr_t gma_bus_addr;
-+	phys_addr_t pte_bus_addr;
- 	u32 __iomem *gtt;		/* I915G */
- 	int num_dcache_entries;
--	/* gtt_entries is the number of gtt entries that are already mapped
--	 * to stolen memory.  Stolen memory is larger than the memory mapped
--	 * through gtt_entries, as it includes some reserved space for the BIOS
--	 * popup and for the GTT.
--	 */
--	int gtt_entries;			/* i830+ */
--	int gtt_total_size;
- 	union {
- 		void __iomem *i9xx_flush_page;
- 		void *i8xx_flush_page;
-@@ -88,23 +105,14 @@ static struct _intel_private {
- 	struct page *i8xx_page;
- 	struct resource ifp_resource;
- 	int resource_valid;
-+	struct page *scratch_page;
-+	dma_addr_t scratch_page_dma;
- } intel_private;
- 
--#ifdef USE_PCI_DMA_API
--static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
--{
--	*ret = pci_map_page(intel_private.pcidev, page, 0,
--			    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
--	if (pci_dma_mapping_error(intel_private.pcidev, *ret))
--		return -EINVAL;
--	return 0;
--}
--
--static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
--{
--	pci_unmap_page(intel_private.pcidev, dma,
--		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
--}
-+#define INTEL_GTT_GEN	intel_private.driver->gen
-+#define IS_G33		intel_private.driver->is_g33
-+#define IS_PINEVIEW	intel_private.driver->is_pineview
-+#define IS_IRONLAKE	intel_private.driver->is_ironlake
- 
- static void intel_agp_free_sglist(struct agp_memory *mem)
- {
-@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
- 	struct scatterlist *sg;
- 	int i;
- 
-+	if (mem->sg_list)
-+		return 0; /* already mapped (for e.g. resume */
-+
- 	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
- 
- 	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
-@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
- 	intel_agp_free_sglist(mem);
- }
- 
--static void intel_agp_insert_sg_entries(struct agp_memory *mem,
--					off_t pg_start, int mask_type)
--{
--	struct scatterlist *sg;
--	int i, j;
--
--	j = pg_start;
--
--	WARN_ON(!mem->num_sg);
--
--	if (mem->num_sg == mem->page_count) {
--		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
--			writel(agp_bridge->driver->mask_memory(agp_bridge,
--					sg_dma_address(sg), mask_type),
--					intel_private.gtt+j);
--			j++;
--		}
--	} else {
--		/* sg may merge pages, but we have to separate
--		 * per-page addr for GTT */
--		unsigned int len, m;
--
--		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
--			len = sg_dma_len(sg) / PAGE_SIZE;
--			for (m = 0; m < len; m++) {
--				writel(agp_bridge->driver->mask_memory(agp_bridge,
--								       sg_dma_address(sg) + m * PAGE_SIZE,
--								       mask_type),
--				       intel_private.gtt+j);
--				j++;
--			}
--		}
--	}
--	readl(intel_private.gtt+j-1);
--}
--
--#else
--
--static void intel_agp_insert_sg_entries(struct agp_memory *mem,
--					off_t pg_start, int mask_type)
--{
--	int i, j;
--
--	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
--		writel(agp_bridge->driver->mask_memory(agp_bridge,
--				page_to_phys(mem->pages[i]), mask_type),
--		       intel_private.gtt+j);
--	}
--
--	readl(intel_private.gtt+j-1);
--}
--
--#endif
--
- static int intel_i810_fetch_size(void)
- {
- 	u32 smram_miscc;
- 	struct aper_size_info_fixed *values;
- 
--	pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
-+	pci_read_config_dword(intel_private.bridge_dev,
-+			      I810_SMRAM_MISCC, &smram_miscc);
- 	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
- 
- 	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
--		dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
-+		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
- 		return 0;
- 	}
- 	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
- 	iounmap(intel_private.registers);
- }
- 
--static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
- {
- 	return;
- }
-@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
- 	atomic_dec(&agp_bridge->current_memory_agp);
- }
- 
--static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
--					int type)
--{
--	if (type < AGP_USER_TYPES)
--		return type;
--	else if (type == AGP_USER_CACHED_MEMORY)
--		return INTEL_AGP_CACHED_MEMORY;
--	else
--		return 0;
--}
--
--static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
--					int type)
--{
--	unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
--	unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
--
--	if (type_mask == AGP_USER_UNCACHED_MEMORY)
--		return INTEL_AGP_UNCACHED_MEMORY;
--	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
--		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
--			      INTEL_AGP_CACHED_MEMORY_LLC_MLC;
--	else /* set 'normal'/'cached' to LLC by default */
--		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
--			      INTEL_AGP_CACHED_MEMORY_LLC;
--}
--
--
- static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- 				int type)
- {
-@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- 	return addr | bridge->driver->masks[type].mask;
- }
- 
--static struct aper_size_info_fixed intel_i830_sizes[] =
-+static int intel_gtt_setup_scratch_page(void)
- {
-+	struct page *page;
-+	dma_addr_t dma_addr;
-+
-+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-+	if (page == NULL)
-+		return -ENOMEM;
-+	get_page(page);
-+	set_pages_uc(page, 1);
-+
-+	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
-+		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
-+				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-+		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
-+			return -EINVAL;
-+
-+		intel_private.scratch_page_dma = dma_addr;
-+	} else
-+		intel_private.scratch_page_dma = page_to_phys(page);
-+
-+	intel_private.scratch_page = page;
-+
-+	return 0;
-+}
-+
-+static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
- 	{128, 32768, 5},
- 	/* The 64M mode still requires a 128k gatt */
- 	{64, 16384, 5},
-@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
- 	{512, 131072, 7},
- };
- 
--static void intel_i830_init_gtt_entries(void)
-+static unsigned int intel_gtt_stolen_entries(void)
- {
- 	u16 gmch_ctrl;
--	int gtt_entries = 0;
- 	u8 rdct;
- 	int local = 0;
- 	static const int ddt[4] = { 0, 16, 32, 64 };
--	int size; /* reserved space (in kb) at the top of stolen memory */
-+	unsigned int overhead_entries, stolen_entries;
-+	unsigned int stolen_size = 0;
- 
--	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-+	pci_read_config_word(intel_private.bridge_dev,
-+			     I830_GMCH_CTRL, &gmch_ctrl);
- 
--	if (IS_I965) {
--		u32 pgetbl_ctl;
--		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-+	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
-+		overhead_entries = 0;
-+	else
-+		overhead_entries = intel_private.base.gtt_mappable_entries
-+			/ 1024;
- 
--		/* The 965 has a field telling us the size of the GTT,
--		 * which may be larger than what is necessary to map the
--		 * aperture.
--		 */
--		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
--		case I965_PGETBL_SIZE_128KB:
--			size = 128;
--			break;
--		case I965_PGETBL_SIZE_256KB:
--			size = 256;
--			break;
--		case I965_PGETBL_SIZE_512KB:
--			size = 512;
--			break;
--		case I965_PGETBL_SIZE_1MB:
--			size = 1024;
--			break;
--		case I965_PGETBL_SIZE_2MB:
--			size = 2048;
--			break;
--		case I965_PGETBL_SIZE_1_5MB:
--			size = 1024 + 512;
--			break;
--		default:
--			dev_info(&intel_private.pcidev->dev,
--				 "unknown page table size, assuming 512KB\n");
--			size = 512;
--		}
--		size += 4; /* add in BIOS popup space */
--	} else if (IS_G33 && !IS_PINEVIEW) {
--	/* G33's GTT size defined in gmch_ctrl */
--		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
--		case G33_PGETBL_SIZE_1M:
--			size = 1024;
--			break;
--		case G33_PGETBL_SIZE_2M:
--			size = 2048;
--			break;
--		default:
--			dev_info(&agp_bridge->dev->dev,
--				 "unknown page table size 0x%x, assuming 512KB\n",
--				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
--			size = 512;
--		}
--		size += 4;
--	} else if (IS_G4X || IS_PINEVIEW) {
--		/* On 4 series hardware, GTT stolen is separate from graphics
--		 * stolen, ignore it in stolen gtt entries counting.  However,
--		 * 4KB of the stolen memory doesn't get mapped to the GTT.
--		 */
--		size = 4;
--	} else {
--		/* On previous hardware, the GTT size was just what was
--		 * required to map the aperture.
--		 */
--		size = agp_bridge->driver->fetch_size() + 4;
--	}
-+	overhead_entries += 1; /* BIOS popup */
- 
--	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
--	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
-+	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
-+	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
- 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- 		case I830_GMCH_GMS_STOLEN_512:
--			gtt_entries = KB(512) - KB(size);
-+			stolen_size = KB(512);
- 			break;
- 		case I830_GMCH_GMS_STOLEN_1024:
--			gtt_entries = MB(1) - KB(size);
-+			stolen_size = MB(1);
- 			break;
- 		case I830_GMCH_GMS_STOLEN_8192:
--			gtt_entries = MB(8) - KB(size);
-+			stolen_size = MB(8);
- 			break;
- 		case I830_GMCH_GMS_LOCAL:
- 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
--			gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
-+			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
- 					MB(ddt[I830_RDRAM_DDT(rdct)]);
- 			local = 1;
- 			break;
- 		default:
--			gtt_entries = 0;
-+			stolen_size = 0;
- 			break;
- 		}
--	} else if (IS_SNB) {
-+	} else if (INTEL_GTT_GEN == 6) {
- 		/*
- 		 * SandyBridge has new memory control reg at 0x50.w
- 		 */
-@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
- 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- 		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- 		case SNB_GMCH_GMS_STOLEN_32M:
--			gtt_entries = MB(32) - KB(size);
-+			stolen_size = MB(32);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_64M:
--			gtt_entries = MB(64) - KB(size);
-+			stolen_size = MB(64);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_96M:
--			gtt_entries = MB(96) - KB(size);
-+			stolen_size = MB(96);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_128M:
--			gtt_entries = MB(128) - KB(size);
-+			stolen_size = MB(128);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_160M:
--			gtt_entries = MB(160) - KB(size);
-+			stolen_size = MB(160);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_192M:
--			gtt_entries = MB(192) - KB(size);
-+			stolen_size = MB(192);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_224M:
--			gtt_entries = MB(224) - KB(size);
-+			stolen_size = MB(224);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_256M:
--			gtt_entries = MB(256) - KB(size);
-+			stolen_size = MB(256);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_288M:
--			gtt_entries = MB(288) - KB(size);
-+			stolen_size = MB(288);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_320M:
--			gtt_entries = MB(320) - KB(size);
-+			stolen_size = MB(320);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_352M:
--			gtt_entries = MB(352) - KB(size);
-+			stolen_size = MB(352);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_384M:
--			gtt_entries = MB(384) - KB(size);
-+			stolen_size = MB(384);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_416M:
--			gtt_entries = MB(416) - KB(size);
-+			stolen_size = MB(416);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_448M:
--			gtt_entries = MB(448) - KB(size);
-+			stolen_size = MB(448);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_480M:
--			gtt_entries = MB(480) - KB(size);
-+			stolen_size = MB(480);
- 			break;
- 		case SNB_GMCH_GMS_STOLEN_512M:
--			gtt_entries = MB(512) - KB(size);
-+			stolen_size = MB(512);
- 			break;
- 		}
- 	} else {
- 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- 		case I855_GMCH_GMS_STOLEN_1M:
--			gtt_entries = MB(1) - KB(size);
-+			stolen_size = MB(1);
- 			break;
- 		case I855_GMCH_GMS_STOLEN_4M:
--			gtt_entries = MB(4) - KB(size);
-+			stolen_size = MB(4);
- 			break;
- 		case I855_GMCH_GMS_STOLEN_8M:
--			gtt_entries = MB(8) - KB(size);
-+			stolen_size = MB(8);
- 			break;
- 		case I855_GMCH_GMS_STOLEN_16M:
--			gtt_entries = MB(16) - KB(size);
-+			stolen_size = MB(16);
- 			break;
- 		case I855_GMCH_GMS_STOLEN_32M:
--			gtt_entries = MB(32) - KB(size);
-+			stolen_size = MB(32);
- 			break;
- 		case I915_GMCH_GMS_STOLEN_48M:
--			/* Check it's really I915G */
--			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
--				gtt_entries = MB(48) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(48);
- 			break;
- 		case I915_GMCH_GMS_STOLEN_64M:
--			/* Check it's really I915G */
--			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
--				gtt_entries = MB(64) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(64);
- 			break;
- 		case G33_GMCH_GMS_STOLEN_128M:
--			if (IS_G33 || IS_I965 || IS_G4X)
--				gtt_entries = MB(128) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(128);
- 			break;
- 		case G33_GMCH_GMS_STOLEN_256M:
--			if (IS_G33 || IS_I965 || IS_G4X)
--				gtt_entries = MB(256) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(256);
- 			break;
- 		case INTEL_GMCH_GMS_STOLEN_96M:
--			if (IS_I965 || IS_G4X)
--				gtt_entries = MB(96) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(96);
- 			break;
- 		case INTEL_GMCH_GMS_STOLEN_160M:
--			if (IS_I965 || IS_G4X)
--				gtt_entries = MB(160) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(160);
- 			break;
- 		case INTEL_GMCH_GMS_STOLEN_224M:
--			if (IS_I965 || IS_G4X)
--				gtt_entries = MB(224) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(224);
- 			break;
- 		case INTEL_GMCH_GMS_STOLEN_352M:
--			if (IS_I965 || IS_G4X)
--				gtt_entries = MB(352) - KB(size);
--			else
--				gtt_entries = 0;
-+			stolen_size = MB(352);
- 			break;
- 		default:
--			gtt_entries = 0;
-+			stolen_size = 0;
- 			break;
- 		}
- 	}
--	if (!local && gtt_entries > intel_max_stolen) {
--		dev_info(&agp_bridge->dev->dev,
-+
-+	if (!local && stolen_size > intel_max_stolen) {
-+		dev_info(&intel_private.bridge_dev->dev,
- 			 "detected %dK stolen memory, trimming to %dK\n",
--			 gtt_entries / KB(1), intel_max_stolen / KB(1));
--		gtt_entries = intel_max_stolen / KB(4);
--	} else if (gtt_entries > 0) {
--		dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
--		       gtt_entries / KB(1), local ? "local" : "stolen");
--		gtt_entries /= KB(4);
-+			 stolen_size / KB(1), intel_max_stolen / KB(1));
-+		stolen_size = intel_max_stolen;
-+	} else if (stolen_size > 0) {
-+		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
-+		       stolen_size / KB(1), local ? "local" : "stolen");
- 	} else {
--		dev_info(&agp_bridge->dev->dev,
-+		dev_info(&intel_private.bridge_dev->dev,
- 		       "no pre-allocated video memory detected\n");
--		gtt_entries = 0;
-+		stolen_size = 0;
-+	}
-+
-+	stolen_entries = stolen_size/KB(4) - overhead_entries;
-+
-+	return stolen_entries;
-+}
-+
-+static unsigned int intel_gtt_total_entries(void)
-+{
-+	int size;
-+
-+	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
-+		u32 pgetbl_ctl;
-+		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-+
-+		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-+		case I965_PGETBL_SIZE_128KB:
-+			size = KB(128);
-+			break;
-+		case I965_PGETBL_SIZE_256KB:
-+			size = KB(256);
-+			break;
-+		case I965_PGETBL_SIZE_512KB:
-+			size = KB(512);
-+			break;
-+		case I965_PGETBL_SIZE_1MB:
-+			size = KB(1024);
-+			break;
-+		case I965_PGETBL_SIZE_2MB:
-+			size = KB(2048);
-+			break;
-+		case I965_PGETBL_SIZE_1_5MB:
-+			size = KB(1024 + 512);
-+			break;
-+		default:
-+			dev_info(&intel_private.pcidev->dev,
-+				 "unknown page table size, assuming 512KB\n");
-+			size = KB(512);
-+		}
-+
-+		return size/4;
-+	} else if (INTEL_GTT_GEN == 6) {
-+		u16 snb_gmch_ctl;
-+
-+		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-+		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-+		default:
-+		case SNB_GTT_SIZE_0M:
-+			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-+			size = MB(0);
-+			break;
-+		case SNB_GTT_SIZE_1M:
-+			size = MB(1);
-+			break;
-+		case SNB_GTT_SIZE_2M:
-+			size = MB(2);
-+			break;
-+		}
-+		return size/4;
-+	} else {
-+		/* On previous hardware, the GTT size was just what was
-+		 * required to map the aperture.
-+		 */
-+		return intel_private.base.gtt_mappable_entries;
-+	}
-+}
-+
-+static unsigned int intel_gtt_mappable_entries(void)
-+{
-+	unsigned int aperture_size;
-+
-+	if (INTEL_GTT_GEN == 2) {
-+		u16 gmch_ctrl;
-+
-+		pci_read_config_word(intel_private.bridge_dev,
-+				     I830_GMCH_CTRL, &gmch_ctrl);
-+
-+		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
-+			aperture_size = MB(64);
-+		else
-+			aperture_size = MB(128);
-+	} else {
-+		/* 9xx supports large sizes, just look at the length */
-+		aperture_size = pci_resource_len(intel_private.pcidev, 2);
-+	}
-+
-+	return aperture_size >> PAGE_SHIFT;
-+}
-+
-+static void intel_gtt_teardown_scratch_page(void)
-+{
-+	set_pages_wb(intel_private.scratch_page, 1);
-+	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
-+		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-+	put_page(intel_private.scratch_page);
-+	__free_page(intel_private.scratch_page);
-+}
-+
-+static void intel_gtt_cleanup(void)
-+{
-+	intel_private.driver->cleanup();
-+
-+	iounmap(intel_private.gtt);
-+	iounmap(intel_private.registers);
-+	
-+	intel_gtt_teardown_scratch_page();
-+}
-+
-+static int intel_gtt_init(void)
-+{
-+	u32 gtt_map_size;
-+	int ret;
-+
-+	ret = intel_private.driver->setup();
-+	if (ret != 0)
-+		return ret;
-+
-+	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
-+	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
-+
-+	dev_info(&intel_private.bridge_dev->dev,
-+			"detected gtt size: %dK total, %dK mappable\n",
-+			intel_private.base.gtt_total_entries * 4,
-+			intel_private.base.gtt_mappable_entries * 4);
-+
-+	gtt_map_size = intel_private.base.gtt_total_entries * 4;
-+
-+	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
-+				    gtt_map_size);
-+	if (!intel_private.gtt) {
-+		intel_private.driver->cleanup();
-+		iounmap(intel_private.registers);
-+		return -ENOMEM;
-+	}
-+
-+	global_cache_flush();   /* FIXME: ? */
-+
-+	/* we have to call this as early as possible after the MMIO base address is known */
-+	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
-+	if (intel_private.base.gtt_stolen_entries == 0) {
-+		intel_private.driver->cleanup();
-+		iounmap(intel_private.registers);
-+		iounmap(intel_private.gtt);
-+		return -ENOMEM;
-+	}
-+
-+	ret = intel_gtt_setup_scratch_page();
-+	if (ret != 0) {
-+		intel_gtt_cleanup();
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static int intel_fake_agp_fetch_size(void)
-+{
-+	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
-+	unsigned int aper_size;
-+	int i;
-+
-+	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
-+		    / MB(1);
-+
-+	for (i = 0; i < num_sizes; i++) {
-+		if (aper_size == intel_fake_agp_sizes[i].size) {
-+			agp_bridge->current_size =
-+				(void *) (intel_fake_agp_sizes + i);
-+			return aper_size;
-+		}
- 	}
- 
--	intel_private.gtt_entries = gtt_entries;
-+	return 0;
- }
- 
--static void intel_i830_fini_flush(void)
-+static void i830_cleanup(void)
- {
- 	kunmap(intel_private.i8xx_page);
- 	intel_private.i8xx_flush_page = NULL;
--	unmap_page_from_agp(intel_private.i8xx_page);
- 
- 	__free_page(intel_private.i8xx_page);
- 	intel_private.i8xx_page = NULL;
-@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
- 	if (intel_private.i8xx_page)
- 		return;
- 
--	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
-+	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- 	if (!intel_private.i8xx_page)
- 		return;
- 
- 	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- 	if (!intel_private.i8xx_flush_page)
--		intel_i830_fini_flush();
-+		i830_cleanup();
- }
- 
- /* The chipset_flush interface needs to get data that has already been
-@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
-  * that buffer out, we just fill 1KB and clflush it out, on the assumption
-  * that it'll push whatever was in there out.  It appears to work.
-  */
--static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-+static void i830_chipset_flush(void)
- {
- 	unsigned int *pg = intel_private.i8xx_flush_page;
- 
-@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
- 		printk(KERN_ERR "Timed out waiting for cache flush.\n");
- }
- 
--/* The intel i830 automatically initializes the agp aperture during POST.
-- * Use the memory already set aside for in the GTT.
-- */
--static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
-+			     unsigned int flags)
- {
--	int page_order;
--	struct aper_size_info_fixed *size;
--	int num_entries;
--	u32 temp;
-+	u32 pte_flags = I810_PTE_VALID;
-+	
-+	switch (flags) {
-+	case AGP_DCACHE_MEMORY:
-+		pte_flags |= I810_PTE_LOCAL;
-+		break;
-+	case AGP_USER_CACHED_MEMORY:
-+		pte_flags |= I830_PTE_SYSTEM_CACHED;
-+		break;
-+	}
- 
--	size = agp_bridge->current_size;
--	page_order = size->page_order;
--	num_entries = size->num_entries;
--	agp_bridge->gatt_table_real = NULL;
-+	writel(addr | pte_flags, intel_private.gtt + entry);
-+}
- 
--	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
--	temp &= 0xfff80000;
-+static void intel_enable_gtt(void)
-+{
-+	u32 gma_addr;
-+	u16 gmch_ctrl;
- 
--	intel_private.registers = ioremap(temp, 128 * 4096);
--	if (!intel_private.registers)
--		return -ENOMEM;
-+	if (INTEL_GTT_GEN == 2)
-+		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
-+				      &gma_addr);
-+	else
-+		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
-+				      &gma_addr);
- 
--	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
--	global_cache_flush();	/* FIXME: ?? */
-+	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- 
--	/* we have to call this as early as possible after the MMIO base address is known */
--	intel_i830_init_gtt_entries();
--	if (intel_private.gtt_entries == 0) {
--		iounmap(intel_private.registers);
-+	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
-+	gmch_ctrl |= I830_GMCH_ENABLED;
-+	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
-+
-+	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
-+	       intel_private.registers+I810_PGETBL_CTL);
-+	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-+}
-+
-+static int i830_setup(void)
-+{
-+	u32 reg_addr;
-+
-+	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
-+	reg_addr &= 0xfff80000;
-+
-+	intel_private.registers = ioremap(reg_addr, KB(64));
-+	if (!intel_private.registers)
- 		return -ENOMEM;
--	}
- 
--	agp_bridge->gatt_table = NULL;
-+	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
-+	intel_private.pte_bus_addr =
-+		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- 
--	agp_bridge->gatt_bus_addr = temp;
-+	intel_i830_setup_flush();
- 
- 	return 0;
- }
- 
--/* Return the gatt table to a sane state. Use the top of stolen
-- * memory for the GTT.
-- */
--static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
- {
-+	agp_bridge->gatt_table_real = NULL;
-+	agp_bridge->gatt_table = NULL;
-+	agp_bridge->gatt_bus_addr = 0;
-+
- 	return 0;
- }
- 
--static int intel_i830_fetch_size(void)
-+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
- {
--	u16 gmch_ctrl;
--	struct aper_size_info_fixed *values;
-+	return 0;
-+}
- 
--	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-+static int intel_fake_agp_configure(void)
-+{
-+	int i;
- 
--	if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
--	    agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
--		/* 855GM/852GM/865G has 128MB aperture size */
--		agp_bridge->current_size = (void *) values;
--		agp_bridge->aperture_size_idx = 0;
--		return values[0].size;
--	}
-+	intel_enable_gtt();
- 
--	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-+	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- 
--	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
--		agp_bridge->current_size = (void *) values;
--		agp_bridge->aperture_size_idx = 0;
--		return values[0].size;
--	} else {
--		agp_bridge->current_size = (void *) (values + 1);
--		agp_bridge->aperture_size_idx = 1;
--		return values[1].size;
-+	for (i = intel_private.base.gtt_stolen_entries;
-+			i < intel_private.base.gtt_total_entries; i++) {
-+		intel_private.driver->write_entry(intel_private.scratch_page_dma,
-+						  i, 0);
- 	}
-+	readl(intel_private.gtt+i-1);	/* PCI Posting. */
-+
-+	global_cache_flush();
- 
- 	return 0;
- }
- 
--static int intel_i830_configure(void)
-+static bool i830_check_flags(unsigned int flags)
- {
--	struct aper_size_info_fixed *current_size;
--	u32 temp;
--	u16 gmch_ctrl;
--	int i;
-+	switch (flags) {
-+	case 0:
-+	case AGP_PHYS_MEMORY:
-+	case AGP_USER_CACHED_MEMORY:
-+	case AGP_USER_MEMORY:
-+		return true;
-+	}
- 
--	current_size = A_SIZE_FIX(agp_bridge->current_size);
-+	return false;
-+}
- 
--	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
--	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-+static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-+					unsigned int sg_len,
-+					unsigned int pg_start,
-+					unsigned int flags)
-+{
-+	struct scatterlist *sg;
-+	unsigned int len, m;
-+	int i, j;
- 
--	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
--	gmch_ctrl |= I830_GMCH_ENABLED;
--	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
--
--	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
--	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-+	j = pg_start;
- 
--	if (agp_bridge->driver->needs_scratch_page) {
--		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
--			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-+	/* sg may merge pages, but we have to separate
-+	 * per-page addr for GTT */
-+	for_each_sg(sg_list, sg, sg_len, i) {
-+		len = sg_dma_len(sg) >> PAGE_SHIFT;
-+		for (m = 0; m < len; m++) {
-+			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-+			intel_private.driver->write_entry(addr,
-+							  j, flags);
-+			j++;
- 		}
--		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI Posting. */
- 	}
--
--	global_cache_flush();
--
--	intel_i830_setup_flush();
--	return 0;
--}
--
--static void intel_i830_cleanup(void)
--{
--	iounmap(intel_private.registers);
-+	readl(intel_private.gtt+j-1);
- }
- 
--static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
--				     int type)
-+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
-+					 off_t pg_start, int type)
- {
--	int i, j, num_entries;
--	void *temp;
-+	int i, j;
- 	int ret = -EINVAL;
--	int mask_type;
- 
- 	if (mem->page_count == 0)
- 		goto out;
- 
--	temp = agp_bridge->current_size;
--	num_entries = A_SIZE_FIX(temp)->num_entries;
--
--	if (pg_start < intel_private.gtt_entries) {
-+	if (pg_start < intel_private.base.gtt_stolen_entries) {
- 		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
--			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
--			   pg_start, intel_private.gtt_entries);
-+			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
-+			   pg_start, intel_private.base.gtt_stolen_entries);
- 
- 		dev_info(&intel_private.pcidev->dev,
- 			 "trying to insert into local/stolen memory\n");
- 		goto out_err;
- 	}
- 
--	if ((pg_start + mem->page_count) > num_entries)
-+	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
- 		goto out_err;
- 
--	/* The i830 can't check the GTT for entries since its read only,
--	 * depend on the caller to make the correct offset decisions.
--	 */
--
- 	if (type != mem->type)
- 		goto out_err;
- 
--	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
--
--	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
--	    mask_type != INTEL_AGP_CACHED_MEMORY)
-+	if (!intel_private.driver->check_flags(type))
- 		goto out_err;
- 
- 	if (!mem->is_flushed)
- 		global_cache_flush();
- 
--	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
--		writel(agp_bridge->driver->mask_memory(agp_bridge,
--				page_to_phys(mem->pages[i]), mask_type),
--		       intel_private.registers+I810_PTE_BASE+(j*4));
-+	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
-+		ret = intel_agp_map_memory(mem);
-+		if (ret != 0)
-+			return ret;
-+
-+		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
-+					    pg_start, type);
-+	} else {
-+		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-+			dma_addr_t addr = page_to_phys(mem->pages[i]);
-+			intel_private.driver->write_entry(addr,
-+							  j, type);
-+		}
-+		readl(intel_private.gtt+j-1);
- 	}
--	readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- 
- out:
- 	ret = 0;
-@@ -982,29 +1042,39 @@ out_err:
- 	return ret;
- }
- 
--static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
--				     int type)
-+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
-+					 off_t pg_start, int type)
- {
- 	int i;
- 
- 	if (mem->page_count == 0)
- 		return 0;
- 
--	if (pg_start < intel_private.gtt_entries) {
-+	if (pg_start < intel_private.base.gtt_stolen_entries) {
- 		dev_info(&intel_private.pcidev->dev,
- 			 "trying to disable local/stolen memory\n");
- 		return -EINVAL;
- 	}
- 
-+	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
-+		intel_agp_unmap_memory(mem);
-+
- 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
--		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-+		intel_private.driver->write_entry(intel_private.scratch_page_dma,
-+						  i, 0);
- 	}
--	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-+	readl(intel_private.gtt+i-1);
- 
- 	return 0;
- }
- 
--static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-+static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
-+{
-+	intel_private.driver->chipset_flush();
-+}
-+
-+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
-+						       int type)
- {
- 	if (type == AGP_PHYS_MEMORY)
- 		return alloc_agpphysmem_i8xx(pg_count, type);
-@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
- static int intel_alloc_chipset_flush_resource(void)
- {
- 	int ret;
--	ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
-+	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
- 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
--				     pcibios_align_resource, agp_bridge->dev);
-+				     pcibios_align_resource, intel_private.bridge_dev);
- 
- 	return ret;
- }
-@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
- 	int ret;
- 	u32 temp;
- 
--	pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
-+	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
- 	if (!(temp & 0x1)) {
- 		intel_alloc_chipset_flush_resource();
- 		intel_private.resource_valid = 1;
--		pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-+		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- 	} else {
- 		temp &= ~1;
- 
-@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
- 	u32 temp_hi, temp_lo;
- 	int ret;
- 
--	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
--	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-+	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
-+	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
- 
- 	if (!(temp_lo & 0x1)) {
- 
- 		intel_alloc_chipset_flush_resource();
- 
- 		intel_private.resource_valid = 1;
--		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
-+		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
- 			upper_32_bits(intel_private.ifp_resource.start));
--		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-+		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- 	} else {
- 		u64 l64;
- 
-@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
- 	if (intel_private.ifp_resource.start)
- 		return;
- 
--	if (IS_SNB)
-+	if (INTEL_GTT_GEN == 6)
- 		return;
- 
- 	/* setup a resource for this object */
-@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
- 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
- 
- 	/* Setup chipset flush for 915 */
--	if (IS_I965 || IS_G33 || IS_G4X) {
-+	if (IS_G33 || INTEL_GTT_GEN >= 4) {
- 		intel_i965_g33_setup_chipset_flush();
- 	} else {
- 		intel_i915_setup_chipset_flush();
-@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
- 			"can't ioremap flush page - no chipset flushing\n");
- }
- 
--static int intel_i9xx_configure(void)
--{
--	struct aper_size_info_fixed *current_size;
--	u32 temp;
--	u16 gmch_ctrl;
--	int i;
--
--	current_size = A_SIZE_FIX(agp_bridge->current_size);
--
--	pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
--
--	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
--
--	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
--	gmch_ctrl |= I830_GMCH_ENABLED;
--	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
--
--	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
--	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
--
--	if (agp_bridge->driver->needs_scratch_page) {
--		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
--			writel(agp_bridge->scratch_page, intel_private.gtt+i);
--		}
--		readl(intel_private.gtt+i-1);	/* PCI Posting. */
--	}
--
--	global_cache_flush();
--
--	intel_i9xx_setup_flush();
--
--	return 0;
--}
--
--static void intel_i915_cleanup(void)
-+static void i9xx_cleanup(void)
- {
- 	if (intel_private.i9xx_flush_page)
- 		iounmap(intel_private.i9xx_flush_page);
-@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
- 		release_resource(&intel_private.ifp_resource);
- 	intel_private.ifp_resource.start = 0;
- 	intel_private.resource_valid = 0;
--	iounmap(intel_private.gtt);
--	iounmap(intel_private.registers);
- }
- 
--static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-+static void i9xx_chipset_flush(void)
- {
- 	if (intel_private.i9xx_flush_page)
- 		writel(1, intel_private.i9xx_flush_page);
- }
- 
--static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
--				     int type)
-+static void i965_write_entry(dma_addr_t addr, unsigned int entry,
-+			     unsigned int flags)
- {
--	int num_entries;
--	void *temp;
--	int ret = -EINVAL;
--	int mask_type;
--
--	if (mem->page_count == 0)
--		goto out;
--
--	temp = agp_bridge->current_size;
--	num_entries = A_SIZE_FIX(temp)->num_entries;
--
--	if (pg_start < intel_private.gtt_entries) {
--		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
--			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
--			   pg_start, intel_private.gtt_entries);
--
--		dev_info(&intel_private.pcidev->dev,
--			 "trying to insert into local/stolen memory\n");
--		goto out_err;
--	}
--
--	if ((pg_start + mem->page_count) > num_entries)
--		goto out_err;
--
--	/* The i915 can't check the GTT for entries since it's read only;
--	 * depend on the caller to make the correct offset decisions.
--	 */
--
--	if (type != mem->type)
--		goto out_err;
--
--	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
--
--	if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
--	    mask_type != INTEL_AGP_CACHED_MEMORY)
--		goto out_err;
--
--	if (!mem->is_flushed)
--		global_cache_flush();
--
--	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
--
-- out:
--	ret = 0;
-- out_err:
--	mem->is_flushed = true;
--	return ret;
-+	/* Shift high bits down */
-+	addr |= (addr >> 28) & 0xf0;
-+	writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
- }
- 
--static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
--				     int type)
-+static bool gen6_check_flags(unsigned int flags)
- {
--	int i;
--
--	if (mem->page_count == 0)
--		return 0;
--
--	if (pg_start < intel_private.gtt_entries) {
--		dev_info(&intel_private.pcidev->dev,
--			 "trying to disable local/stolen memory\n");
--		return -EINVAL;
--	}
--
--	for (i = pg_start; i < (mem->page_count + pg_start); i++)
--		writel(agp_bridge->scratch_page, intel_private.gtt+i);
--
--	readl(intel_private.gtt+i-1);
--
--	return 0;
-+	return true;
- }
- 
--/* Return the aperture size by just checking the resource length.  The effect
-- * described in the spec of the MSAC registers is just changing of the
-- * resource size.
-- */
--static int intel_i9xx_fetch_size(void)
-+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
-+			     unsigned int flags)
- {
--	int num_sizes = ARRAY_SIZE(intel_i830_sizes);
--	int aper_size; /* size in megabytes */
--	int i;
--
--	aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
--
--	for (i = 0; i < num_sizes; i++) {
--		if (aper_size == intel_i830_sizes[i].size) {
--			agp_bridge->current_size = intel_i830_sizes + i;
--			return aper_size;
--		}
-+	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-+	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-+	u32 pte_flags;
-+
-+	if (type_mask == AGP_USER_MEMORY)
-+		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
-+	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-+		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
-+		if (gfdt)
-+			pte_flags |= GEN6_PTE_GFDT;
-+	} else { /* set 'normal'/'cached' to LLC by default */
-+		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-+		if (gfdt)
-+			pte_flags |= GEN6_PTE_GFDT;
- 	}
- 
--	return 0;
-+	/* gen6 has bit11-4 for physical addr bit39-32 */
-+	addr |= (addr >> 28) & 0xff0;
-+	writel(addr | pte_flags, intel_private.gtt + entry);
- }
- 
--static int intel_i915_get_gtt_size(void)
-+static void gen6_cleanup(void)
- {
--	int size;
--
--	if (IS_G33) {
--		u16 gmch_ctrl;
--
--		/* G33's GTT size defined in gmch_ctrl */
--		pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
--		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
--		case I830_GMCH_GMS_STOLEN_512:
--			size = 512;
--			break;
--		case I830_GMCH_GMS_STOLEN_1024:
--			size = 1024;
--			break;
--		case I830_GMCH_GMS_STOLEN_8192:
--			size = 8*1024;
--			break;
--		default:
--			dev_info(&agp_bridge->dev->dev,
--				 "unknown page table size 0x%x, assuming 512KB\n",
--				(gmch_ctrl & I830_GMCH_GMS_MASK));
--			size = 512;
--		}
--	} else {
--		/* On previous hardware, the GTT size was just what was
--		 * required to map the aperture.
--		 */
--		size = agp_bridge->driver->fetch_size();
--	}
--
--	return KB(size);
- }
- 
--/* The intel i915 automatically initializes the agp aperture during POST.
-- * Use the memory already set aside for in the GTT.
-- */
--static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-+static int i9xx_setup(void)
- {
--	int page_order;
--	struct aper_size_info_fixed *size;
--	int num_entries;
--	u32 temp, temp2;
--	int gtt_map_size;
--
--	size = agp_bridge->current_size;
--	page_order = size->page_order;
--	num_entries = size->num_entries;
--	agp_bridge->gatt_table_real = NULL;
--
--	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
--	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
--
--	gtt_map_size = intel_i915_get_gtt_size();
-+	u32 reg_addr;
- 
--	intel_private.gtt = ioremap(temp2, gtt_map_size);
--	if (!intel_private.gtt)
--		return -ENOMEM;
--
--	intel_private.gtt_total_size = gtt_map_size / 4;
--
--	temp &= 0xfff80000;
--
--	intel_private.registers = ioremap(temp, 128 * 4096);
--	if (!intel_private.registers) {
--		iounmap(intel_private.gtt);
--		return -ENOMEM;
--	}
-+	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
- 
--	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
--	global_cache_flush();	/* FIXME: ? */
-+	reg_addr &= 0xfff80000;
- 
--	/* we have to call this as early as possible after the MMIO base address is known */
--	intel_i830_init_gtt_entries();
--	if (intel_private.gtt_entries == 0) {
--		iounmap(intel_private.gtt);
--		iounmap(intel_private.registers);
-+	intel_private.registers = ioremap(reg_addr, 128 * 4096);
-+	if (!intel_private.registers)
- 		return -ENOMEM;
--	}
- 
--	agp_bridge->gatt_table = NULL;
--
--	agp_bridge->gatt_bus_addr = temp;
-+	if (INTEL_GTT_GEN == 3) {
-+		u32 gtt_addr;
- 
--	return 0;
--}
--
--/*
-- * The i965 supports 36-bit physical addresses, but to keep
-- * the format of the GTT the same, the bits that don't fit
-- * in a 32-bit word are shifted down to bits 4..7.
-- *
-- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
-- * is always zero on 32-bit architectures, so no need to make
-- * this conditional.
-- */
--static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
--					    dma_addr_t addr, int type)
--{
--	/* Shift high bits down */
--	addr |= (addr >> 28) & 0xf0;
--
--	/* Type checking must be done elsewhere */
--	return addr | bridge->driver->masks[type].mask;
--}
--
--static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
--					    dma_addr_t addr, int type)
--{
--	/* gen6 has bit11-4 for physical addr bit39-32 */
--	addr |= (addr >> 28) & 0xff0;
--
--	/* Type checking must be done elsewhere */
--	return addr | bridge->driver->masks[type].mask;
--}
--
--static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
--{
--	u16 snb_gmch_ctl;
--
--	switch (agp_bridge->dev->device) {
--	case PCI_DEVICE_ID_INTEL_GM45_HB:
--	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
--	case PCI_DEVICE_ID_INTEL_Q45_HB:
--	case PCI_DEVICE_ID_INTEL_G45_HB:
--	case PCI_DEVICE_ID_INTEL_G41_HB:
--	case PCI_DEVICE_ID_INTEL_B43_HB:
--	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
--	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
--	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
--	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
--		*gtt_offset = *gtt_size = MB(2);
--		break;
--	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
--	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
--	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
--		*gtt_offset = MB(2);
-+		pci_read_config_dword(intel_private.pcidev,
-+				      I915_PTEADDR, &gtt_addr);
-+		intel_private.gtt_bus_addr = gtt_addr;
-+	} else {
-+		u32 gtt_offset;
- 
--		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
--		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
--		default:
--		case SNB_GTT_SIZE_0M:
--			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
--			*gtt_size = MB(0);
--			break;
--		case SNB_GTT_SIZE_1M:
--			*gtt_size = MB(1);
-+		switch (INTEL_GTT_GEN) {
-+		case 5:
-+		case 6:
-+			gtt_offset = MB(2);
- 			break;
--		case SNB_GTT_SIZE_2M:
--			*gtt_size = MB(2);
-+		case 4:
-+		default:
-+			gtt_offset =  KB(512);
- 			break;
- 		}
--		break;
--	default:
--		*gtt_offset = *gtt_size = KB(512);
--	}
--}
--
--/* The intel i965 automatically initializes the agp aperture during POST.
-- * Use the memory already set aside for in the GTT.
-- */
--static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
--{
--	int page_order;
--	struct aper_size_info_fixed *size;
--	int num_entries;
--	u32 temp;
--	int gtt_offset, gtt_size;
--
--	size = agp_bridge->current_size;
--	page_order = size->page_order;
--	num_entries = size->num_entries;
--	agp_bridge->gatt_table_real = NULL;
--
--	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
--
--	temp &= 0xfff00000;
--
--	intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
--
--	intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
--
--	if (!intel_private.gtt)
--		return -ENOMEM;
--
--	intel_private.gtt_total_size = gtt_size / 4;
--
--	intel_private.registers = ioremap(temp, 128 * 4096);
--	if (!intel_private.registers) {
--		iounmap(intel_private.gtt);
--		return -ENOMEM;
--	}
--
--	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
--	global_cache_flush();   /* FIXME: ? */
--
--	/* we have to call this as early as possible after the MMIO base address is known */
--	intel_i830_init_gtt_entries();
--	if (intel_private.gtt_entries == 0) {
--		iounmap(intel_private.gtt);
--		iounmap(intel_private.registers);
--		return -ENOMEM;
-+		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
- 	}
- 
--	agp_bridge->gatt_table = NULL;
-+	intel_private.pte_bus_addr =
-+		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- 
--	agp_bridge->gatt_bus_addr = temp;
-+	intel_i9xx_setup_flush();
- 
- 	return 0;
- }
-@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
- 	.cleanup		= intel_i810_cleanup,
- 	.mask_memory		= intel_i810_mask_memory,
- 	.masks			= intel_i810_masks,
--	.agp_enable		= intel_i810_agp_enable,
-+	.agp_enable		= intel_fake_agp_enable,
- 	.cache_flush		= global_cache_flush,
- 	.create_gatt_table	= agp_generic_create_gatt_table,
- 	.free_gatt_table	= agp_generic_free_gatt_table,
-@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
- 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
- };
- 
--static const struct agp_bridge_driver intel_830_driver = {
-+static const struct agp_bridge_driver intel_fake_agp_driver = {
- 	.owner			= THIS_MODULE,
--	.aperture_sizes		= intel_i830_sizes,
- 	.size_type		= FIXED_APER_SIZE,
--	.num_aperture_sizes	= 4,
--	.needs_scratch_page	= true,
--	.configure		= intel_i830_configure,
--	.fetch_size		= intel_i830_fetch_size,
--	.cleanup		= intel_i830_cleanup,
--	.mask_memory		= intel_i810_mask_memory,
--	.masks			= intel_i810_masks,
--	.agp_enable		= intel_i810_agp_enable,
-+	.aperture_sizes		= intel_fake_agp_sizes,
-+	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
-+	.configure		= intel_fake_agp_configure,
-+	.fetch_size		= intel_fake_agp_fetch_size,
-+	.cleanup		= intel_gtt_cleanup,
-+	.agp_enable		= intel_fake_agp_enable,
- 	.cache_flush		= global_cache_flush,
--	.create_gatt_table	= intel_i830_create_gatt_table,
--	.free_gatt_table	= intel_i830_free_gatt_table,
--	.insert_memory		= intel_i830_insert_entries,
--	.remove_memory		= intel_i830_remove_entries,
--	.alloc_by_type		= intel_i830_alloc_by_type,
-+	.create_gatt_table	= intel_fake_agp_create_gatt_table,
-+	.free_gatt_table	= intel_fake_agp_free_gatt_table,
-+	.insert_memory		= intel_fake_agp_insert_entries,
-+	.remove_memory		= intel_fake_agp_remove_entries,
-+	.alloc_by_type		= intel_fake_agp_alloc_by_type,
- 	.free_by_type		= intel_i810_free_by_type,
- 	.agp_alloc_page		= agp_generic_alloc_page,
- 	.agp_alloc_pages        = agp_generic_alloc_pages,
- 	.agp_destroy_page	= agp_generic_destroy_page,
- 	.agp_destroy_pages      = agp_generic_destroy_pages,
--	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
--	.chipset_flush		= intel_i830_chipset_flush,
-+	.chipset_flush		= intel_fake_agp_chipset_flush,
- };
- 
--static const struct agp_bridge_driver intel_915_driver = {
--	.owner			= THIS_MODULE,
--	.aperture_sizes		= intel_i830_sizes,
--	.size_type		= FIXED_APER_SIZE,
--	.num_aperture_sizes	= 4,
--	.needs_scratch_page	= true,
--	.configure		= intel_i9xx_configure,
--	.fetch_size		= intel_i9xx_fetch_size,
--	.cleanup		= intel_i915_cleanup,
--	.mask_memory		= intel_i810_mask_memory,
--	.masks			= intel_i810_masks,
--	.agp_enable		= intel_i810_agp_enable,
--	.cache_flush		= global_cache_flush,
--	.create_gatt_table	= intel_i915_create_gatt_table,
--	.free_gatt_table	= intel_i830_free_gatt_table,
--	.insert_memory		= intel_i915_insert_entries,
--	.remove_memory		= intel_i915_remove_entries,
--	.alloc_by_type		= intel_i830_alloc_by_type,
--	.free_by_type		= intel_i810_free_by_type,
--	.agp_alloc_page		= agp_generic_alloc_page,
--	.agp_alloc_pages        = agp_generic_alloc_pages,
--	.agp_destroy_page	= agp_generic_destroy_page,
--	.agp_destroy_pages      = agp_generic_destroy_pages,
--	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
--	.chipset_flush		= intel_i915_chipset_flush,
--#ifdef USE_PCI_DMA_API
--	.agp_map_page		= intel_agp_map_page,
--	.agp_unmap_page		= intel_agp_unmap_page,
--	.agp_map_memory		= intel_agp_map_memory,
--	.agp_unmap_memory	= intel_agp_unmap_memory,
--#endif
-+static const struct intel_gtt_driver i81x_gtt_driver = {
-+	.gen = 1,
-+	.dma_mask_size = 32,
- };
--
--static const struct agp_bridge_driver intel_i965_driver = {
--	.owner			= THIS_MODULE,
--	.aperture_sizes		= intel_i830_sizes,
--	.size_type		= FIXED_APER_SIZE,
--	.num_aperture_sizes	= 4,
--	.needs_scratch_page	= true,
--	.configure		= intel_i9xx_configure,
--	.fetch_size		= intel_i9xx_fetch_size,
--	.cleanup		= intel_i915_cleanup,
--	.mask_memory		= intel_i965_mask_memory,
--	.masks			= intel_i810_masks,
--	.agp_enable		= intel_i810_agp_enable,
--	.cache_flush		= global_cache_flush,
--	.create_gatt_table	= intel_i965_create_gatt_table,
--	.free_gatt_table	= intel_i830_free_gatt_table,
--	.insert_memory		= intel_i915_insert_entries,
--	.remove_memory		= intel_i915_remove_entries,
--	.alloc_by_type		= intel_i830_alloc_by_type,
--	.free_by_type		= intel_i810_free_by_type,
--	.agp_alloc_page		= agp_generic_alloc_page,
--	.agp_alloc_pages        = agp_generic_alloc_pages,
--	.agp_destroy_page	= agp_generic_destroy_page,
--	.agp_destroy_pages      = agp_generic_destroy_pages,
--	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
--	.chipset_flush		= intel_i915_chipset_flush,
--#ifdef USE_PCI_DMA_API
--	.agp_map_page		= intel_agp_map_page,
--	.agp_unmap_page		= intel_agp_unmap_page,
--	.agp_map_memory		= intel_agp_map_memory,
--	.agp_unmap_memory	= intel_agp_unmap_memory,
--#endif
-+static const struct intel_gtt_driver i8xx_gtt_driver = {
-+	.gen = 2,
-+	.setup = i830_setup,
-+	.cleanup = i830_cleanup,
-+	.write_entry = i830_write_entry,
-+	.dma_mask_size = 32,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i830_chipset_flush,
- };
--
--static const struct agp_bridge_driver intel_gen6_driver = {
--	.owner			= THIS_MODULE,
--	.aperture_sizes		= intel_i830_sizes,
--	.size_type		= FIXED_APER_SIZE,
--	.num_aperture_sizes	= 4,
--	.needs_scratch_page	= true,
--	.configure		= intel_i9xx_configure,
--	.fetch_size		= intel_i9xx_fetch_size,
--	.cleanup		= intel_i915_cleanup,
--	.mask_memory		= intel_gen6_mask_memory,
--	.masks			= intel_gen6_masks,
--	.agp_enable		= intel_i810_agp_enable,
--	.cache_flush		= global_cache_flush,
--	.create_gatt_table	= intel_i965_create_gatt_table,
--	.free_gatt_table	= intel_i830_free_gatt_table,
--	.insert_memory		= intel_i915_insert_entries,
--	.remove_memory		= intel_i915_remove_entries,
--	.alloc_by_type		= intel_i830_alloc_by_type,
--	.free_by_type		= intel_i810_free_by_type,
--	.agp_alloc_page		= agp_generic_alloc_page,
--	.agp_alloc_pages        = agp_generic_alloc_pages,
--	.agp_destroy_page	= agp_generic_destroy_page,
--	.agp_destroy_pages      = agp_generic_destroy_pages,
--	.agp_type_to_mask_type	= intel_gen6_type_to_mask_type,
--	.chipset_flush		= intel_i915_chipset_flush,
--#ifdef USE_PCI_DMA_API
--	.agp_map_page		= intel_agp_map_page,
--	.agp_unmap_page		= intel_agp_unmap_page,
--	.agp_map_memory		= intel_agp_map_memory,
--	.agp_unmap_memory	= intel_agp_unmap_memory,
--#endif
-+static const struct intel_gtt_driver i915_gtt_driver = {
-+	.gen = 3,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
-+	.write_entry = i830_write_entry, 
-+	.dma_mask_size = 32,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver g33_gtt_driver = {
-+	.gen = 3,
-+	.is_g33 = 1,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	.write_entry = i965_write_entry,
-+	.dma_mask_size = 36,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver pineview_gtt_driver = {
-+	.gen = 3,
-+	.is_pineview = 1, .is_g33 = 1,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	.write_entry = i965_write_entry,
-+	.dma_mask_size = 36,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver i965_gtt_driver = {
-+	.gen = 4,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	.write_entry = i965_write_entry,
-+	.dma_mask_size = 36,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver g4x_gtt_driver = {
-+	.gen = 5,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	.write_entry = i965_write_entry,
-+	.dma_mask_size = 36,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver ironlake_gtt_driver = {
-+	.gen = 5,
-+	.is_ironlake = 1,
-+	.setup = i9xx_setup,
-+	.cleanup = i9xx_cleanup,
-+	.write_entry = i965_write_entry,
-+	.dma_mask_size = 36,
-+	.check_flags = i830_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
-+};
-+static const struct intel_gtt_driver sandybridge_gtt_driver = {
-+	.gen = 6,
-+	.setup = i9xx_setup,
-+	.cleanup = gen6_cleanup,
-+	.write_entry = gen6_write_entry,
-+	.dma_mask_size = 40,
-+	.check_flags = gen6_check_flags,
-+	.chipset_flush = i9xx_chipset_flush,
- };
- 
--static const struct agp_bridge_driver intel_g33_driver = {
--	.owner			= THIS_MODULE,
--	.aperture_sizes		= intel_i830_sizes,
--	.size_type		= FIXED_APER_SIZE,
--	.num_aperture_sizes	= 4,
--	.needs_scratch_page	= true,
--	.configure		= intel_i9xx_configure,
--	.fetch_size		= intel_i9xx_fetch_size,
--	.cleanup		= intel_i915_cleanup,
--	.mask_memory		= intel_i965_mask_memory,
--	.masks			= intel_i810_masks,
--	.agp_enable		= intel_i810_agp_enable,
--	.cache_flush		= global_cache_flush,
--	.create_gatt_table	= intel_i915_create_gatt_table,
--	.free_gatt_table	= intel_i830_free_gatt_table,
--	.insert_memory		= intel_i915_insert_entries,
--	.remove_memory		= intel_i915_remove_entries,
--	.alloc_by_type		= intel_i830_alloc_by_type,
--	.free_by_type		= intel_i810_free_by_type,
--	.agp_alloc_page		= agp_generic_alloc_page,
--	.agp_alloc_pages        = agp_generic_alloc_pages,
--	.agp_destroy_page	= agp_generic_destroy_page,
--	.agp_destroy_pages      = agp_generic_destroy_pages,
--	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
--	.chipset_flush		= intel_i915_chipset_flush,
--#ifdef USE_PCI_DMA_API
--	.agp_map_page		= intel_agp_map_page,
--	.agp_unmap_page		= intel_agp_unmap_page,
--	.agp_map_memory		= intel_agp_map_memory,
--	.agp_unmap_memory	= intel_agp_unmap_memory,
--#endif
-+/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
-+ * driver and gmch_driver must be non-null, and find_gmch will determine
-+ * which one should be used if a gmch_chip_id is present.
-+ */
-+static const struct intel_gtt_driver_description {
-+	unsigned int gmch_chip_id;
-+	char *name;
-+	const struct agp_bridge_driver *gmch_driver;
-+	const struct intel_gtt_driver *gtt_driver;
-+} intel_gtt_chipsets[] = {
-+	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
-+		&i81x_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
-+		&i81x_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
-+		&i81x_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
-+		&i81x_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
-+		&intel_fake_agp_driver, &i8xx_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
-+		&intel_fake_agp_driver, &i8xx_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
-+		&intel_fake_agp_driver, &i8xx_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
-+		&intel_fake_agp_driver, &i8xx_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
-+		&intel_fake_agp_driver, &i8xx_gtt_driver},
-+	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
-+		&intel_fake_agp_driver, &i915_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
-+		&intel_fake_agp_driver, &i965_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
-+		&intel_fake_agp_driver, &g33_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
-+		&intel_fake_agp_driver, &g33_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
-+		&intel_fake_agp_driver, &g33_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
-+		&intel_fake_agp_driver, &pineview_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
-+		&intel_fake_agp_driver, &pineview_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
-+		&intel_fake_agp_driver, &g4x_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
-+	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-+	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-+	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
-+	{ 0, NULL, NULL }
- };
-+
-+static int find_gmch(u16 device)
-+{
-+	struct pci_dev *gmch_device;
-+
-+	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
-+	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
-+		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
-+					     device, gmch_device);
-+	}
-+
-+	if (!gmch_device)
-+		return 0;
-+
-+	intel_private.pcidev = gmch_device;
-+	return 1;
-+}
-+
-+int intel_gmch_probe(struct pci_dev *pdev,
-+				      struct agp_bridge_data *bridge)
-+{
-+	int i, mask;
-+	bridge->driver = NULL;
-+
-+	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
-+		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
-+			bridge->driver =
-+				intel_gtt_chipsets[i].gmch_driver;
-+			intel_private.driver = 
-+				intel_gtt_chipsets[i].gtt_driver;
-+			break;
-+		}
-+	}
-+
-+	if (!bridge->driver)
-+		return 0;
-+
-+	bridge->dev_private_data = &intel_private;
-+	bridge->dev = pdev;
-+
-+	intel_private.bridge_dev = pci_dev_get(pdev);
-+
-+	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
-+
-+	mask = intel_private.driver->dma_mask_size;
-+	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
-+		dev_err(&intel_private.pcidev->dev,
-+			"set gfx device dma mask %d-bit failed!\n", mask);
-+	else
-+		pci_set_consistent_dma_mask(intel_private.pcidev,
-+					    DMA_BIT_MASK(mask));
-+
-+	if (bridge->driver == &intel_810_driver)
-+		return 1;
-+
-+	if (intel_gtt_init() != 0)
-+		return 0;
-+
-+	return 1;
-+}
-+EXPORT_SYMBOL(intel_gmch_probe);
-+
-+struct intel_gtt *intel_gtt_get(void)
-+{
-+	return &intel_private.base;
-+}
-+EXPORT_SYMBOL(intel_gtt_get);
-+
-+void intel_gmch_remove(struct pci_dev *pdev)
-+{
-+	if (intel_private.pcidev)
-+		pci_dev_put(intel_private.pcidev);
-+	if (intel_private.bridge_dev)
-+		pci_dev_put(intel_private.bridge_dev);
-+}
-+EXPORT_SYMBOL(intel_gmch_remove);
-+
-+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
-+MODULE_LICENSE("GPL and additional rights");
-diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
-index 96e9631..7f356af 100644
---- a/drivers/gpu/drm/drm_edid.c
-+++ b/drivers/gpu/drm/drm_edid.c
-@@ -1268,34 +1268,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
- }
- 
- #define HDMI_IDENTIFIER 0x000C03
-+#define AUDIO_BLOCK	0x01
- #define VENDOR_BLOCK    0x03
-+#define EDID_BASIC_AUDIO	(1 << 6)
-+
- /**
-- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
-- * @edid: monitor EDID information
-- *
-- * Parse the CEA extension according to CEA-861-B.
-- * Return true if HDMI, false if not or unknown.
-+ * Search EDID for CEA extension block.
-  */
--bool drm_detect_hdmi_monitor(struct edid *edid)
-+static u8 *drm_find_cea_extension(struct edid *edid)
- {
--	char *edid_ext = NULL;
--	int i, hdmi_id;
--	int start_offset, end_offset;
--	bool is_hdmi = false;
-+	u8 *edid_ext = NULL;
-+	int i;
- 
- 	/* No EDID or EDID extensions */
- 	if (edid == NULL || edid->extensions == 0)
--		goto end;
-+		return NULL;
- 
- 	/* Find CEA extension */
- 	for (i = 0; i < edid->extensions; i++) {
--		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
--		/* This block is CEA extension */
--		if (edid_ext[0] == 0x02)
-+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
-+		if (edid_ext[0] == CEA_EXT)
- 			break;
- 	}
- 
- 	if (i == edid->extensions)
-+		return NULL;
-+
-+	return edid_ext;
-+}
-+
-+/**
-+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
-+ * @edid: monitor EDID information
-+ *
-+ * Parse the CEA extension according to CEA-861-B.
-+ * Return true if HDMI, false if not or unknown.
-+ */
-+bool drm_detect_hdmi_monitor(struct edid *edid)
-+{
-+	u8 *edid_ext;
-+	int i, hdmi_id;
-+	int start_offset, end_offset;
-+	bool is_hdmi = false;
-+
-+	edid_ext = drm_find_cea_extension(edid);
-+	if (!edid_ext)
- 		goto end;
- 
- 	/* Data block offset in CEA extension block */
-@@ -1326,6 +1343,53 @@ end:
- EXPORT_SYMBOL(drm_detect_hdmi_monitor);
- 
- /**
-+ * drm_detect_monitor_audio - check monitor audio capability
-+ *
-+ * Monitor should have CEA extension block.
-+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
-+ * audio' only. If there is any audio extension block and supported
-+ * audio format, assume at least 'basic audio' support, even if 'basic
-+ * audio' is not defined in EDID.
-+ *
-+ */
-+bool drm_detect_monitor_audio(struct edid *edid)
-+{
-+	u8 *edid_ext;
-+	int i, j;
-+	bool has_audio = false;
-+	int start_offset, end_offset;
-+
-+	edid_ext = drm_find_cea_extension(edid);
-+	if (!edid_ext)
-+		goto end;
-+
-+	has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
-+
-+	if (has_audio) {
-+		DRM_DEBUG_KMS("Monitor has basic audio support\n");
-+		goto end;
-+	}
-+
-+	/* Data block offset in CEA extension block */
-+	start_offset = 4;
-+	end_offset = edid_ext[2];
-+
-+	for (i = start_offset; i < end_offset;
-+			i += ((edid_ext[i] & 0x1f) + 1)) {
-+		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
-+			has_audio = true;
-+			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
-+				DRM_DEBUG_KMS("CEA audio format %d\n",
-+					      (edid_ext[i + j] >> 3) & 0xf);
-+			goto end;
-+		}
-+	}
-+end:
-+	return has_audio;
-+}
-+EXPORT_SYMBOL(drm_detect_monitor_audio);
-+
-+/**
-  * drm_add_edid_modes - add modes from EDID data, if available
-  * @connector: connector we're probing
-  * @edid: edid data
-diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
-index 5c8e534..fdc833d 100644
---- a/drivers/gpu/drm/i915/Makefile
-+++ b/drivers/gpu/drm/i915/Makefile
-@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
- 	  intel_dvo.o \
- 	  intel_ringbuffer.o \
- 	  intel_overlay.o \
-+	  intel_opregion.o \
- 	  dvo_ch7xxx.o \
- 	  dvo_ch7017.o \
- 	  dvo_ivch.o \
- 	  dvo_tfp410.o \
- 	  dvo_sil164.o
- 
--i915-$(CONFIG_ACPI)	+= i915_opregion.o
- i915-$(CONFIG_COMPAT)   += i915_ioc32.o
- 
-+i915-$(CONFIG_ACPI)	+= intel_acpi.o
-+
- obj-$(CONFIG_DRM_I915)  += i915.o
- 
- CFLAGS_i915_trace_points.o := -I$(src)
-diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
-index 14d5980..af70337 100644
---- a/drivers/gpu/drm/i915/dvo_ch7017.c
-+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
-@@ -165,67 +165,44 @@ struct ch7017_priv {
- static void ch7017_dump_regs(struct intel_dvo_device *dvo);
- static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
- 
--static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
-+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
- {
--	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
--	u8 out_buf[2];
--	u8 in_buf[2];
--
- 	struct i2c_msg msgs[] = {
- 		{
- 			.addr = dvo->slave_addr,
- 			.flags = 0,
- 			.len = 1,
--			.buf = out_buf,
-+			.buf = &addr,
- 		},
- 		{
- 			.addr = dvo->slave_addr,
- 			.flags = I2C_M_RD,
- 			.len = 1,
--			.buf = in_buf,
-+			.buf = val,
- 		}
- 	};
--
--	out_buf[0] = addr;
--	out_buf[1] = 0;
--
--	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
--		*val= in_buf[0];
--		return true;
--	};
--
--	return false;
-+	return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
- }
- 
--static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
-+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
- {
--	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
--	uint8_t out_buf[2];
-+	uint8_t buf[2] = { addr, val };
- 	struct i2c_msg msg = {
- 		.addr = dvo->slave_addr,
- 		.flags = 0,
- 		.len = 2,
--		.buf = out_buf,
-+		.buf = buf,
- 	};
--
--	out_buf[0] = addr;
--	out_buf[1] = val;
--
--	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
--		return true;
--
--	return false;
-+	return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
- }
- 
- /** Probes for a CH7017 on the given bus and slave address. */
- static bool ch7017_init(struct intel_dvo_device *dvo,
- 			struct i2c_adapter *adapter)
- {
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	struct ch7017_priv *priv;
--	uint8_t val;
-+	const char *str;
-+	u8 val;
- 
- 	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
- 	if (priv == NULL)
-@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
- 	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
- 		goto fail;
- 
--	if (val != CH7017_DEVICE_ID_VALUE &&
--	    val != CH7018_DEVICE_ID_VALUE &&
--	    val != CH7019_DEVICE_ID_VALUE) {
-+	switch (val) {
-+	case CH7017_DEVICE_ID_VALUE:
-+		str = "ch7017";
-+		break;
-+	case CH7018_DEVICE_ID_VALUE:
-+		str = "ch7018";
-+		break;
-+	case CH7019_DEVICE_ID_VALUE:
-+		str = "ch7019";
-+		break;
-+	default:
- 		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
--				"Slave %d.\n",
--			  val, i2cbus->adapter.name,dvo->slave_addr);
-+			      "slave %d.\n",
-+			      val, adapter->name,dvo->slave_addr);
- 		goto fail;
- 	}
- 
-+	DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
-+		      str, adapter->name, dvo->slave_addr);
- 	return true;
-+
- fail:
- 	kfree(priv);
- 	return false;
-@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
- 	}
- 
- 	/* XXX: Should actually wait for update power status somehow */
--	udelay(20000);
-+	msleep(20);
- }
- 
- static void ch7017_dump_regs(struct intel_dvo_device *dvo)
-diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
-index 6f1944b..7eaa94e 100644
---- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
-+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
-@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- {
- 	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	u8 out_buf[2];
- 	u8 in_buf[2];
- 
-@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = 0;
- 
--	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
-+	if (i2c_transfer(adapter, msgs, 2) == 2) {
- 		*ch = in_buf[0];
- 		return true;
- 	};
- 
- 	if (!ch7xxx->quiet) {
- 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 	return false;
- }
-@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- {
- 	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	uint8_t out_buf[2];
- 	struct i2c_msg msg = {
- 		.addr = dvo->slave_addr,
-@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = ch;
- 
--	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-+	if (i2c_transfer(adapter, &msg, 1) == 1)
- 		return true;
- 
- 	if (!ch7xxx->quiet) {
- 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 
- 	return false;
-diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
-index a2ec3f4..a12ed94 100644
---- a/drivers/gpu/drm/i915/dvo_ivch.c
-+++ b/drivers/gpu/drm/i915/dvo_ivch.c
-@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
- {
- 	struct ivch_priv *priv = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	u8 out_buf[1];
- 	u8 in_buf[2];
- 
-@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
- 
- 	out_buf[0] = addr;
- 
--	if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
-+	if (i2c_transfer(adapter, msgs, 3) == 3) {
- 		*data = (in_buf[1] << 8) | in_buf[0];
- 		return true;
- 	};
-@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
- 	if (!priv->quiet) {
- 		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
- 				"%s:%02x.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 	return false;
- }
-@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
- {
- 	struct ivch_priv *priv = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	u8 out_buf[3];
- 	struct i2c_msg msg = {
- 		.addr = dvo->slave_addr,
-@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
- 	out_buf[1] = data & 0xff;
- 	out_buf[2] = data >> 8;
- 
--	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-+	if (i2c_transfer(adapter, &msg, 1) == 1)
- 		return true;
- 
- 	if (!priv->quiet) {
- 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 
- 	return false;
-diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
-index 9b8e676..e4b4091 100644
---- a/drivers/gpu/drm/i915/dvo_sil164.c
-+++ b/drivers/gpu/drm/i915/dvo_sil164.c
-@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- {
- 	struct sil164_priv *sil = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	u8 out_buf[2];
- 	u8 in_buf[2];
- 
-@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = 0;
- 
--	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
-+	if (i2c_transfer(adapter, msgs, 2) == 2) {
- 		*ch = in_buf[0];
- 		return true;
- 	};
- 
- 	if (!sil->quiet) {
- 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 	return false;
- }
-@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- {
- 	struct sil164_priv *sil= dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	uint8_t out_buf[2];
- 	struct i2c_msg msg = {
- 		.addr = dvo->slave_addr,
-@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = ch;
- 
--	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-+	if (i2c_transfer(adapter, &msg, 1) == 1)
- 		return true;
- 
- 	if (!sil->quiet) {
- 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 
- 	return false;
-diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
-index 56f6642..8ab2855 100644
---- a/drivers/gpu/drm/i915/dvo_tfp410.c
-+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
-@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- {
- 	struct tfp410_priv *tfp = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	u8 out_buf[2];
- 	u8 in_buf[2];
- 
-@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = 0;
- 
--	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
-+	if (i2c_transfer(adapter, msgs, 2) == 2) {
- 		*ch = in_buf[0];
- 		return true;
- 	};
- 
- 	if (!tfp->quiet) {
- 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 	return false;
- }
-@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- {
- 	struct tfp410_priv *tfp = dvo->dev_priv;
- 	struct i2c_adapter *adapter = dvo->i2c_bus;
--	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- 	uint8_t out_buf[2];
- 	struct i2c_msg msg = {
- 		.addr = dvo->slave_addr,
-@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
- 	out_buf[0] = addr;
- 	out_buf[1] = ch;
- 
--	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-+	if (i2c_transfer(adapter, &msg, 1) == 1)
- 		return true;
- 
- 	if (!tfp->quiet) {
- 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
--			  addr, i2cbus->adapter.name, dvo->slave_addr);
-+			  addr, adapter->name, dvo->slave_addr);
- 	}
- 
- 	return false;
-diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
-index 5e43d70..1f4f3ce 100644
---- a/drivers/gpu/drm/i915/i915_debugfs.c
-+++ b/drivers/gpu/drm/i915/i915_debugfs.c
-@@ -40,9 +40,51 @@
- 
- #if defined(CONFIG_DEBUG_FS)
- 
--#define ACTIVE_LIST	1
--#define FLUSHING_LIST	2
--#define INACTIVE_LIST	3
-+enum {
-+	ACTIVE_LIST,
-+	FLUSHING_LIST,
-+	INACTIVE_LIST,
-+	PINNED_LIST,
-+	DEFERRED_FREE_LIST,
-+};
-+
-+static const char *yesno(int v)
-+{
-+	return v ? "yes" : "no";
-+}
-+
-+static int i915_capabilities(struct seq_file *m, void *data)
-+{
-+	struct drm_info_node *node = (struct drm_info_node *) m->private;
-+	struct drm_device *dev = node->minor->dev;
-+	const struct intel_device_info *info = INTEL_INFO(dev);
-+
-+	seq_printf(m, "gen: %d\n", info->gen);
-+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-+	B(is_mobile);
-+	B(is_i85x);
-+	B(is_i915g);
-+	B(is_i945gm);
-+	B(is_g33);
-+	B(need_gfx_hws);
-+	B(is_g4x);
-+	B(is_pineview);
-+	B(is_broadwater);
-+	B(is_crestline);
-+	B(has_fbc);
-+	B(has_rc6);
-+	B(has_pipe_cxsr);
-+	B(has_hotplug);
-+	B(cursor_needs_physical);
-+	B(has_overlay);
-+	B(overlay_needs_physical);
-+	B(supports_tv);
-+	B(has_bsd_ring);
-+	B(has_blt_ring);
-+#undef B
-+
-+	return 0;
-+}
- 
- static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
- {
-@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
-     }
- }
- 
-+static void
-+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
-+{
-+	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
-+		   &obj->base,
-+		   get_pin_flag(obj),
-+		   get_tiling_flag(obj),
-+		   obj->base.size,
-+		   obj->base.read_domains,
-+		   obj->base.write_domain,
-+		   obj->last_rendering_seqno,
-+		   obj->dirty ? " dirty" : "",
-+		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-+	if (obj->base.name)
-+		seq_printf(m, " (name: %d)", obj->base.name);
-+	if (obj->fence_reg != I915_FENCE_REG_NONE)
-+		seq_printf(m, " (fence: %d)", obj->fence_reg);
-+	if (obj->gtt_space != NULL)
-+		seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
-+	if (obj->ring != NULL)
-+		seq_printf(m, " (%s)", obj->ring->name);
-+}
-+
- static int i915_gem_object_list_info(struct seq_file *m, void *data)
- {
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
-@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv;
--	spinlock_t *lock = NULL;
-+	size_t total_obj_size, total_gtt_size;
-+	int count, ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	switch (list) {
- 	case ACTIVE_LIST:
- 		seq_printf(m, "Active:\n");
--		lock = &dev_priv->mm.active_list_lock;
--		head = &dev_priv->render_ring.active_list;
-+		head = &dev_priv->mm.active_list;
- 		break;
- 	case INACTIVE_LIST:
- 		seq_printf(m, "Inactive:\n");
- 		head = &dev_priv->mm.inactive_list;
- 		break;
-+	case PINNED_LIST:
-+		seq_printf(m, "Pinned:\n");
-+		head = &dev_priv->mm.pinned_list;
-+		break;
- 	case FLUSHING_LIST:
- 		seq_printf(m, "Flushing:\n");
- 		head = &dev_priv->mm.flushing_list;
- 		break;
-+	case DEFERRED_FREE_LIST:
-+		seq_printf(m, "Deferred free:\n");
-+		head = &dev_priv->mm.deferred_free_list;
-+		break;
- 	default:
--		DRM_INFO("Ooops, unexpected list\n");
--		return 0;
-+		mutex_unlock(&dev->struct_mutex);
-+		return -EINVAL;
- 	}
- 
--	if (lock)
--		spin_lock(lock);
--	list_for_each_entry(obj_priv, head, list)
--	{
--		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
--			   &obj_priv->base,
--			   get_pin_flag(obj_priv),
--			   obj_priv->base.size,
--			   obj_priv->base.read_domains,
--			   obj_priv->base.write_domain,
--			   obj_priv->last_rendering_seqno,
--			   obj_priv->dirty ? " dirty" : "",
--			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
--
--		if (obj_priv->base.name)
--			seq_printf(m, " (name: %d)", obj_priv->base.name);
--		if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
--			seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
--		if (obj_priv->gtt_space != NULL)
--			seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
--
-+	total_obj_size = total_gtt_size = count = 0;
-+	list_for_each_entry(obj_priv, head, mm_list) {
-+		seq_printf(m, "   ");
-+		describe_obj(m, obj_priv);
- 		seq_printf(m, "\n");
-+		total_obj_size += obj_priv->base.size;
-+		total_gtt_size += obj_priv->gtt_space->size;
-+		count++;
- 	}
-+	mutex_unlock(&dev->struct_mutex);
- 
--	if (lock)
--	    spin_unlock(lock);
-+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
-+		   count, total_obj_size, total_gtt_size);
- 	return 0;
- }
- 
-+static int i915_gem_object_info(struct seq_file *m, void* data)
-+{
-+	struct drm_info_node *node = (struct drm_info_node *) m->private;
-+	struct drm_device *dev = node->minor->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
-+
-+	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
-+	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
-+	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
-+	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
-+	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
-+	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
-+	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
-+
-+	mutex_unlock(&dev->struct_mutex);
-+
-+	return 0;
-+}
-+
-+
- static int i915_gem_pageflip_info(struct seq_file *m, void *data)
- {
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
-@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_request *gem_request;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	seq_printf(m, "Request:\n");
- 	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
-@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
- 			   gem_request->seqno,
- 			   (int) (jiffies - gem_request->emitted_jiffies));
- 	}
-+	mutex_unlock(&dev->struct_mutex);
-+
- 	return 0;
- }
- 
-@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
- 		seq_printf(m, "Current sequence: %d\n",
--			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
-+			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- 	} else {
- 		seq_printf(m, "Current sequence: hws uninitialized\n");
- 	}
- 	seq_printf(m, "Waiter sequence:  %d\n",
- 			dev_priv->mm.waiting_gem_seqno);
- 	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
-+
-+	mutex_unlock(&dev->struct_mutex);
-+
- 	return 0;
- }
- 
-@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	if (!HAS_PCH_SPLIT(dev)) {
- 		seq_printf(m, "Interrupt enable:    %08x\n",
-@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
- 		   atomic_read(&dev_priv->irq_received));
- 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
- 		seq_printf(m, "Current sequence:    %d\n",
--			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
-+			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- 	} else {
- 		seq_printf(m, "Current sequence:    hws uninitialized\n");
- 	}
-@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
- 		   dev_priv->mm.waiting_gem_seqno);
- 	seq_printf(m, "IRQ sequence:        %d\n",
- 		   dev_priv->mm.irq_gem_seqno);
-+	mutex_unlock(&dev->struct_mutex);
-+
- 	return 0;
- }
- 
-@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	int i;
-+	int i, ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
- 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
-@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
- 			seq_printf(m, "\n");
- 		}
- 	}
-+	mutex_unlock(&dev->struct_mutex);
- 
- 	return 0;
- }
-@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
- 	return 0;
- }
- 
--static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
-+static void i915_dump_object(struct seq_file *m,
-+			     struct io_mapping *mapping,
-+			     struct drm_i915_gem_object *obj_priv)
- {
--	int page, i;
--	uint32_t *mem;
-+	int page, page_count, i;
- 
-+	page_count = obj_priv->base.size / PAGE_SIZE;
- 	for (page = 0; page < page_count; page++) {
--		mem = kmap_atomic(pages[page], KM_USER0);
-+		u32 *mem = io_mapping_map_wc(mapping,
-+					     obj_priv->gtt_offset + page * PAGE_SIZE);
- 		for (i = 0; i < PAGE_SIZE; i += 4)
- 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
--		kunmap_atomic(mem, KM_USER0);
-+		io_mapping_unmap(mem);
- 	}
- }
- 
-@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
- 	struct drm_i915_gem_object *obj_priv;
- 	int ret;
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
--	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
--			list) {
-+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- 		obj = &obj_priv->base;
- 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
--		    ret = i915_gem_object_get_pages(obj, 0);
--		    if (ret) {
--			    DRM_ERROR("Failed to get pages: %d\n", ret);
--			    spin_unlock(&dev_priv->mm.active_list_lock);
--			    return ret;
--		    }
--
--		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
--		    i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
--
--		    i915_gem_object_put_pages(obj);
-+		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
-+			       obj_priv->gtt_offset);
-+		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
- 		}
- 	}
- 
--	spin_unlock(&dev_priv->mm.active_list_lock);
-+	mutex_unlock(&dev->struct_mutex);
- 
- 	return 0;
- }
-@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
- 	struct drm_info_node *node = (struct drm_info_node *) m->private;
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	u8 *virt;
--	uint32_t *ptr, off;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	if (!dev_priv->render_ring.gem_object) {
- 		seq_printf(m, "No ringbuffer setup\n");
--		return 0;
--	}
--
--	virt = dev_priv->render_ring.virtual_start;
-+	} else {
-+		u8 *virt = dev_priv->render_ring.virtual_start;
-+		uint32_t off;
- 
--	for (off = 0; off < dev_priv->render_ring.size; off += 4) {
--		ptr = (uint32_t *)(virt + off);
--		seq_printf(m, "%08x :  %08x\n", off, *ptr);
-+		for (off = 0; off < dev_priv->render_ring.size; off += 4) {
-+			uint32_t *ptr = (uint32_t *)(virt + off);
-+			seq_printf(m, "%08x :  %08x\n", off, *ptr);
-+		}
- 	}
-+	mutex_unlock(&dev->struct_mutex);
- 
- 	return 0;
- }
-@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
- 	seq_printf(m, "RingHead :  %08x\n", head);
- 	seq_printf(m, "RingTail :  %08x\n", tail);
- 	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
--	seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
-+	seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
- 
- 	return 0;
- }
-@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
- 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
- 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
- 	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
- 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
- 	}
-@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
- 	} else {
- 		seq_printf(m, "FBC disabled: ");
- 		switch (dev_priv->no_fbc_reason) {
-+		case FBC_NO_OUTPUT:
-+			seq_printf(m, "no outputs");
-+			break;
- 		case FBC_STOLEN_TOO_SMALL:
- 			seq_printf(m, "not enough stolen memory");
- 			break;
-@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	bool sr_enabled = false;
- 
--	if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
-+	if (IS_GEN5(dev))
-+		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-+	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
- 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- 	else if (IS_I915GM(dev))
- 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- 	else if (IS_PINEVIEW(dev))
- 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- 
--	seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
--		   "disabled");
-+	seq_printf(m, "self-refresh: %s\n",
-+		   sr_enabled ? "enabled" : "disabled");
- 
- 	return 0;
- }
-@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
- 	struct drm_device *dev = node->minor->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	unsigned long temp, chipset, gfx;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
- 
- 	temp = i915_mch_val(dev_priv);
- 	chipset = i915_chipset_val(dev_priv);
- 	gfx = i915_gfx_val(dev_priv);
-+	mutex_unlock(&dev->struct_mutex);
- 
- 	seq_printf(m, "GMCH temp: %ld\n", temp);
- 	seq_printf(m, "Chipset power: %ld\n", chipset);
-@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
- 	return 0;
- }
- 
-+static int i915_opregion(struct seq_file *m, void *unused)
-+{
-+	struct drm_info_node *node = (struct drm_info_node *) m->private;
-+	struct drm_device *dev = node->minor->dev;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct intel_opregion *opregion = &dev_priv->opregion;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
-+
-+	if (opregion->header)
-+		seq_write(m, opregion->header, OPREGION_SIZE);
-+
-+	mutex_unlock(&dev->struct_mutex);
-+
-+	return 0;
-+}
-+
-+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
-+{
-+	struct drm_info_node *node = (struct drm_info_node *) m->private;
-+	struct drm_device *dev = node->minor->dev;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct intel_fbdev *ifbdev;
-+	struct intel_framebuffer *fb;
-+	int ret;
-+
-+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-+	if (ret)
-+		return ret;
-+
-+	ifbdev = dev_priv->fbdev;
-+	fb = to_intel_framebuffer(ifbdev->helper.fb);
-+
-+	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
-+		   fb->base.width,
-+		   fb->base.height,
-+		   fb->base.depth,
-+		   fb->base.bits_per_pixel);
-+	describe_obj(m, to_intel_bo(fb->obj));
-+	seq_printf(m, "\n");
-+
-+	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
-+		if (&fb->base == ifbdev->helper.fb)
-+			continue;
-+
-+		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
-+			   fb->base.width,
-+			   fb->base.height,
-+			   fb->base.depth,
-+			   fb->base.bits_per_pixel);
-+		describe_obj(m, to_intel_bo(fb->obj));
-+		seq_printf(m, "\n");
-+	}
-+
-+	mutex_unlock(&dev->mode_config.mutex);
-+
-+	return 0;
-+}
-+
- static int
- i915_wedged_open(struct inode *inode,
- 		 struct file *filp)
-@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
- 		       "wedged :  %d\n",
- 		       atomic_read(&dev_priv->mm.wedged));
- 
-+	if (len > sizeof (buf))
-+		len = sizeof (buf);
-+
- 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
- }
- 
-@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
- 
- 	atomic_set(&dev_priv->mm.wedged, val);
- 	if (val) {
--		DRM_WAKEUP(&dev_priv->irq_queue);
-+		wake_up_all(&dev_priv->irq_queue);
- 		queue_work(dev_priv->wq, &dev_priv->error_work);
- 	}
- 
-@@ -782,6 +974,7 @@ static const struct file_operations i915_wedged_fops = {
- 	.open = i915_wedged_open,
- 	.read = i915_wedged_read,
- 	.write = i915_wedged_write,
-+	.llseek = default_llseek,
- };
- 
- /* As the drm_debugfs_init() routines are called before dev->dev_private is
-@@ -823,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
- }
- 
- static struct drm_info_list i915_debugfs_list[] = {
-+	{"i915_capabilities", i915_capabilities, 0, 0},
-+	{"i915_gem_objects", i915_gem_object_info, 0},
- 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
- 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-+	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
-+	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
- 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
- 	{"i915_gem_request", i915_gem_request_info, 0},
- 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
-@@ -845,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
- 	{"i915_gfxec", i915_gfxec, 0},
- 	{"i915_fbc_status", i915_fbc_status, 0},
- 	{"i915_sr_status", i915_sr_status, 0},
-+	{"i915_opregion", i915_opregion, 0},
-+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
- };
- #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
- 
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 2dd2c93..7a26f4dd 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -40,8 +40,7 @@
- #include <linux/pnp.h>
- #include <linux/vga_switcheroo.h>
- #include <linux/slab.h>
--
--extern int intel_max_stolen; /* from AGP driver */
-+#include <acpi/video.h>
- 
- /**
-  * Sets up the hardware status page for devices that need a physical address
-@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
- 
- 	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
- 
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
- 					     0xf0;
- 
-@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
- 
- 	mutex_lock(&dev->struct_mutex);
- 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
--	if (HAS_BSD(dev))
--		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-+	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-+	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- 	mutex_unlock(&dev->struct_mutex);
- 
- 	/* Clear the HWS virtual address at teardown */
-@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
- 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
- 				ring->status_page.page_addr);
- 	if (ring->status_page.gfx_addr != 0)
--		ring->setup_status_page(dev, ring);
-+		intel_ring_setup_status_page(dev, ring);
- 	else
- 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- 
-@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
- 		return -EINVAL;
- 	}
- 
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		BEGIN_LP_RING(4);
- 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
- 
- 		if (!IS_I830(dev) && !IS_845G(dev)) {
- 			BEGIN_LP_RING(2);
--			if (IS_I965G(dev)) {
-+			if (INTEL_INFO(dev)->gen >= 4) {
- 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
- 				OUT_RING(batch->start);
- 			} else {
-@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
- 	}
- 
- 
--	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
-+	if (IS_G4X(dev) || IS_GEN5(dev)) {
- 		BEGIN_LP_RING(2);
- 		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- 		OUT_RING(MI_NOOP);
-@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
- 	case I915_PARAM_HAS_BSD:
- 		value = HAS_BSD(dev);
- 		break;
-+	case I915_PARAM_HAS_BLT:
-+		value = HAS_BLT(dev);
-+		break;
- 	default:
- 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- 				 param->param);
-@@ -888,12 +890,12 @@ static int
- intel_alloc_mchbar_resource(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
-+	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- 	u32 temp_lo, temp_hi = 0;
- 	u64 mchbar_addr;
- 	int ret;
- 
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
- 		return ret;
- 	}
- 
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- 				       upper_32_bits(dev_priv->mch_res.start));
- 
-@@ -934,7 +936,7 @@ static void
- intel_setup_mchbar(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
-+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- 	u32 temp;
- 	bool enabled;
- 
-@@ -971,7 +973,7 @@ static void
- intel_teardown_mchbar(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
-+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- 	u32 temp;
- 
- 	if (dev_priv->mchbar_need_disable) {
-@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
- 		release_resource(&dev_priv->mch_res);
- }
- 
--/**
-- * i915_probe_agp - get AGP bootup configuration
-- * @pdev: PCI device
-- * @aperture_size: returns AGP aperture configured size
-- * @preallocated_size: returns size of BIOS preallocated AGP space
-- *
-- * Since Intel integrated graphics are UMA, the BIOS has to set aside
-- * some RAM for the framebuffer at early boot.  This code figures out
-- * how much was set aside so we can use it for our own purposes.
-- */
--static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
--			  uint32_t *preallocated_size,
--			  uint32_t *start)
--{
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	u16 tmp = 0;
--	unsigned long overhead;
--	unsigned long stolen;
--
--	/* Get the fb aperture size and "stolen" memory amount. */
--	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
--
--	*aperture_size = 1024 * 1024;
--	*preallocated_size = 1024 * 1024;
--
--	switch (dev->pdev->device) {
--	case PCI_DEVICE_ID_INTEL_82830_CGC:
--	case PCI_DEVICE_ID_INTEL_82845G_IG:
--	case PCI_DEVICE_ID_INTEL_82855GM_IG:
--	case PCI_DEVICE_ID_INTEL_82865_IG:
--		if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
--			*aperture_size *= 64;
--		else
--			*aperture_size *= 128;
--		break;
--	default:
--		/* 9xx supports large sizes, just look at the length */
--		*aperture_size = pci_resource_len(dev->pdev, 2);
--		break;
--	}
--
--	/*
--	 * Some of the preallocated space is taken by the GTT
--	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
--	 */
--	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
--		overhead = 4096;
--	else
--		overhead = (*aperture_size / 1024) + 4096;
--
--	if (IS_GEN6(dev)) {
--		/* SNB has memory control reg at 0x50.w */
--		pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
--
--		switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
--		case INTEL_855_GMCH_GMS_DISABLED:
--			DRM_ERROR("video memory is disabled\n");
--			return -1;
--		case SNB_GMCH_GMS_STOLEN_32M:
--			stolen = 32 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_64M:
--			stolen = 64 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_96M:
--			stolen = 96 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_128M:
--			stolen = 128 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_160M:
--			stolen = 160 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_192M:
--			stolen = 192 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_224M:
--			stolen = 224 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_256M:
--			stolen = 256 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_288M:
--			stolen = 288 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_320M:
--			stolen = 320 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_352M:
--			stolen = 352 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_384M:
--			stolen = 384 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_416M:
--			stolen = 416 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_448M:
--			stolen = 448 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_480M:
--			stolen = 480 * 1024 * 1024;
--			break;
--		case SNB_GMCH_GMS_STOLEN_512M:
--			stolen = 512 * 1024 * 1024;
--			break;
--		default:
--			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
--				  tmp & SNB_GMCH_GMS_STOLEN_MASK);
--			return -1;
--		}
--	} else {
--		switch (tmp & INTEL_GMCH_GMS_MASK) {
--		case INTEL_855_GMCH_GMS_DISABLED:
--			DRM_ERROR("video memory is disabled\n");
--			return -1;
--		case INTEL_855_GMCH_GMS_STOLEN_1M:
--			stolen = 1 * 1024 * 1024;
--			break;
--		case INTEL_855_GMCH_GMS_STOLEN_4M:
--			stolen = 4 * 1024 * 1024;
--			break;
--		case INTEL_855_GMCH_GMS_STOLEN_8M:
--			stolen = 8 * 1024 * 1024;
--			break;
--		case INTEL_855_GMCH_GMS_STOLEN_16M:
--			stolen = 16 * 1024 * 1024;
--			break;
--		case INTEL_855_GMCH_GMS_STOLEN_32M:
--			stolen = 32 * 1024 * 1024;
--			break;
--		case INTEL_915G_GMCH_GMS_STOLEN_48M:
--			stolen = 48 * 1024 * 1024;
--			break;
--		case INTEL_915G_GMCH_GMS_STOLEN_64M:
--			stolen = 64 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_128M:
--			stolen = 128 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_256M:
--			stolen = 256 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_96M:
--			stolen = 96 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_160M:
--			stolen = 160 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_224M:
--			stolen = 224 * 1024 * 1024;
--			break;
--		case INTEL_GMCH_GMS_STOLEN_352M:
--			stolen = 352 * 1024 * 1024;
--			break;
--		default:
--			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
--				  tmp & INTEL_GMCH_GMS_MASK);
--			return -1;
--		}
--	}
--
--	*preallocated_size = stolen - overhead;
--	*start = overhead;
--
--	return 0;
--}
--
- #define PTE_ADDRESS_MASK		0xfffff000
- #define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
- #define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
-@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- {
- 	unsigned long *gtt;
- 	unsigned long entry, phys;
--	int gtt_bar = IS_I9XX(dev) ? 0 : 1;
-+	int gtt_bar = IS_GEN2(dev) ? 1 : 0;
- 	int gtt_offset, gtt_size;
- 
--	if (IS_I965G(dev)) {
--		if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
-+		if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
- 			gtt_offset = 2*1024*1024;
- 			gtt_size = 2*1024*1024;
- 		} else {
-@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- 	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
- 
- 	/* Mask out these reserved bits on this hardware. */
--	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
--	    IS_I945G(dev) || IS_I945GM(dev)) {
-+	if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
- 		entry &= ~PTE_ADDRESS_MASK_HIGH;
--	}
- 
- 	/* If it's not a mapping type we know, then bail. */
- 	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
-@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
- 	unsigned long ll_base = 0;
- 
- 	/* Leave 1M for line length buffer & misc. */
--	compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
-+	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
- 	if (!compressed_fb) {
- 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- 		i915_warn_stolen(dev);
-@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
- 	}
- 
- 	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
--		compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
-+		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
- 						    4096, 0);
- 		if (!compressed_llb) {
- 			i915_warn_stolen(dev);
-@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
- 		/* i915 resume handler doesn't set to D0 */
- 		pci_set_power_state(dev->pdev, PCI_D0);
- 		i915_resume(dev);
--		drm_kms_helper_poll_enable(dev);
- 	} else {
- 		printk(KERN_ERR "i915: switched off\n");
--		drm_kms_helper_poll_disable(dev);
- 		i915_suspend(dev, pmm);
- 	}
- }
-@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
- }
- 
- static int i915_load_modeset_init(struct drm_device *dev,
--				  unsigned long prealloc_start,
- 				  unsigned long prealloc_size,
- 				  unsigned long agp_size)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	int fb_bar = IS_I9XX(dev) ? 2 : 0;
- 	int ret = 0;
- 
--	dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
--		0xff000000;
--
--	/* Basic memrange allocator for stolen space (aka vram) */
--	drm_mm_init(&dev_priv->vram, 0, prealloc_size);
--	DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
--
--	/* We're off and running w/KMS */
--	dev_priv->mm.suspended = 0;
-+	/* Basic memrange allocator for stolen space (aka mm.vram) */
-+	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
- 
- 	/* Let GEM Manage from end of prealloc space to end of aperture.
- 	 *
-@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
- 	 */
- 	dev_priv->allow_batchbuffer = 1;
- 
--	ret = intel_init_bios(dev);
-+	ret = intel_parse_bios(dev);
- 	if (ret)
- 		DRM_INFO("failed to find VBIOS tables\n");
- 
-@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
- 	if (ret)
- 		goto cleanup_ringbuffer;
- 
-+	intel_register_dsm_handler();
-+
- 	ret = vga_switcheroo_register_client(dev->pdev,
- 					     i915_switcheroo_set_state,
- 					     i915_switcheroo_can_switch);
-@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
- 	/* FIXME: do pre/post-mode set stuff in core KMS code */
- 	dev->vblank_disable_allowed = 1;
- 
--	/*
--	 * Initialize the hardware status page IRQ location.
--	 */
--
--	I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
--
- 	ret = intel_fbdev_init(dev);
- 	if (ret)
- 		goto cleanup_irq;
- 
- 	drm_kms_helper_poll_init(dev);
-+
-+	/* We're off and running w/KMS */
-+	dev_priv->mm.suspended = 0;
-+
- 	return 0;
- 
- cleanup_irq:
-@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
-  *   - dev_priv->fmax
-  *   - dev_priv->gpu_busy
-  */
--DEFINE_SPINLOCK(mchdev_lock);
-+static DEFINE_SPINLOCK(mchdev_lock);
- 
- /**
-  * i915_read_mch_val - return value for IPS use
-@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 	struct drm_i915_private *dev_priv;
- 	resource_size_t base, size;
- 	int ret = 0, mmio_bar;
--	uint32_t agp_size, prealloc_size, prealloc_start;
-+	uint32_t agp_size, prealloc_size;
- 	/* i915 has 4 more counters */
- 	dev->counters += 4;
- 	dev->types[6] = _DRM_STAT_IRQ;
-@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 	dev_priv->info = (struct intel_device_info *) flags;
- 
- 	/* Add register map (needed for suspend/resume) */
--	mmio_bar = IS_I9XX(dev) ? 0 : 1;
-+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
- 	base = pci_resource_start(dev->pdev, mmio_bar);
- 	size = pci_resource_len(dev->pdev, mmio_bar);
- 
-@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 			 "performance may suffer.\n");
- 	}
- 
--	ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
--	if (ret)
-+	dev_priv->mm.gtt = intel_gtt_get();
-+	if (!dev_priv->mm.gtt) {
-+		DRM_ERROR("Failed to initialize GTT\n");
-+		ret = -ENODEV;
- 		goto out_iomapfree;
--
--	if (prealloc_size > intel_max_stolen) {
--		DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
--			 prealloc_size >> 20, intel_max_stolen >> 20);
--		prealloc_size = intel_max_stolen;
- 	}
- 
--	dev_priv->wq = create_singlethread_workqueue("i915");
-+	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
-+	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-+
-+	/* The i915 workqueue is primarily used for batched retirement of
-+	 * requests (and thus managing bo) once the task has been completed
-+	 * by the GPU. i915_gem_retire_requests() is called directly when we
-+	 * need high-priority retirement, such as waiting for an explicit
-+	 * bo.
-+	 *
-+	 * It is also used for periodic low-priority events, such as
-+	 * idle-timers and hangcheck.
-+	 *
-+	 * All tasks on the workqueue are expected to acquire the dev mutex
-+	 * so there is no point in running more than one instance of the
-+	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
-+	 */
-+	dev_priv->wq = alloc_workqueue("i915",
-+				       WQ_UNBOUND | WQ_NON_REENTRANT,
-+				       1);
- 	if (dev_priv->wq == NULL) {
- 		DRM_ERROR("Failed to create our workqueue.\n");
- 		ret = -ENOMEM;
-@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 
- 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
- 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
--	if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
-+	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
- 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- 	}
- 
- 	/* Try to make sure MCHBAR is enabled before poking at it */
- 	intel_setup_mchbar(dev);
-+	intel_setup_gmbus(dev);
-+	intel_opregion_setup(dev);
-+
-+	/* Make sure the bios did its job and set up vital registers */
-+	intel_setup_bios(dev);
- 
- 	i915_gem_load(dev);
- 
-@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 
- 	if (IS_PINEVIEW(dev))
- 		i915_pineview_get_mem_freq(dev);
--	else if (IS_IRONLAKE(dev))
-+	else if (IS_GEN5(dev))
- 		i915_ironlake_get_mem_freq(dev);
- 
- 	/* On the 945G/GM, the chipset reports the MSI capability on the
-@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 	intel_detect_pch(dev);
- 
- 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
--		ret = i915_load_modeset_init(dev, prealloc_start,
--					     prealloc_size, agp_size);
-+		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
- 		if (ret < 0) {
- 			DRM_ERROR("failed to init modeset\n");
- 			goto out_workqueue_free;
-@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 	}
- 
- 	/* Must be done after probing outputs */
--	intel_opregion_init(dev, 0);
-+	intel_opregion_init(dev);
-+	acpi_video_register();
- 
- 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
- 		    (unsigned long) dev);
-@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- 	dev_priv->mchdev_lock = &mchdev_lock;
- 	spin_unlock(&mchdev_lock);
- 
--	/* XXX Prevent module unload due to memory corruption bugs. */
--	__module_get(THIS_MODULE);
--
- 	return 0;
- 
- out_workqueue_free:
-@@ -2252,15 +2090,20 @@ free_priv:
- int i915_driver_unload(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--
--	i915_destroy_error_state(dev);
-+	int ret;
- 
- 	spin_lock(&mchdev_lock);
- 	i915_mch_dev = NULL;
- 	spin_unlock(&mchdev_lock);
- 
--	destroy_workqueue(dev_priv->wq);
--	del_timer_sync(&dev_priv->hangcheck_timer);
-+	mutex_lock(&dev->struct_mutex);
-+	ret = i915_gpu_idle(dev);
-+	if (ret)
-+		DRM_ERROR("failed to idle hardware: %d\n", ret);
-+	mutex_unlock(&dev->struct_mutex);
-+
-+	/* Cancel the retire work handler, which should be idle now. */
-+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- 
- 	io_mapping_free(dev_priv->mm.gtt_mapping);
- 	if (dev_priv->mm.gtt_mtrr >= 0) {
-@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
- 		dev_priv->mm.gtt_mtrr = -1;
- 	}
- 
-+	acpi_video_unregister();
-+
- 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-+		intel_fbdev_fini(dev);
- 		intel_modeset_cleanup(dev);
- 
- 		/*
-@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
- 			dev_priv->child_dev = NULL;
- 			dev_priv->child_dev_num = 0;
- 		}
--		drm_irq_uninstall(dev);
-+
- 		vga_switcheroo_unregister_client(dev->pdev);
- 		vga_client_register(dev->pdev, NULL, NULL, NULL);
- 	}
- 
-+	/* Free error state after interrupts are fully disabled. */
-+	del_timer_sync(&dev_priv->hangcheck_timer);
-+	cancel_work_sync(&dev_priv->error_work);
-+	i915_destroy_error_state(dev);
-+
- 	if (dev->pdev->msi_enabled)
- 		pci_disable_msi(dev->pdev);
- 
--	if (dev_priv->regs != NULL)
--		iounmap(dev_priv->regs);
--
--	intel_opregion_free(dev, 0);
-+	intel_opregion_fini(dev);
- 
- 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-+		/* Flush any outstanding unpin_work. */
-+		flush_workqueue(dev_priv->wq);
-+
- 		i915_gem_free_all_phys_object(dev);
- 
- 		mutex_lock(&dev->struct_mutex);
-@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
- 		mutex_unlock(&dev->struct_mutex);
- 		if (I915_HAS_FBC(dev) && i915_powersave)
- 			i915_cleanup_compression(dev);
--		drm_mm_takedown(&dev_priv->vram);
--		i915_gem_lastclose(dev);
-+		drm_mm_takedown(&dev_priv->mm.vram);
- 
- 		intel_cleanup_overlay(dev);
-+
-+		if (!I915_NEED_GFX_HWS(dev))
-+			i915_free_hws(dev);
- 	}
- 
-+	if (dev_priv->regs != NULL)
-+		iounmap(dev_priv->regs);
-+
-+	intel_teardown_gmbus(dev);
- 	intel_teardown_mchbar(dev);
- 
-+	destroy_workqueue(dev_priv->wq);
-+
- 	pci_dev_put(dev_priv->bridge_dev);
- 	kfree(dev->dev_private);
- 
- 	return 0;
- }
- 
--int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
-+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
- {
--	struct drm_i915_file_private *i915_file_priv;
-+	struct drm_i915_file_private *file_priv;
- 
- 	DRM_DEBUG_DRIVER("\n");
--	i915_file_priv = (struct drm_i915_file_private *)
--	    kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
--
--	if (!i915_file_priv)
-+	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
-+	if (!file_priv)
- 		return -ENOMEM;
- 
--	file_priv->driver_priv = i915_file_priv;
-+	file->driver_priv = file_priv;
- 
--	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
-+	spin_lock_init(&file_priv->mm.lock);
-+	INIT_LIST_HEAD(&file_priv->mm.request_list);
- 
- 	return 0;
- }
-@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
- 		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
- }
- 
--void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
-+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
- {
--	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-+	struct drm_i915_file_private *file_priv = file->driver_priv;
- 
--	kfree(i915_file_priv);
-+	kfree(file_priv);
- }
- 
- struct drm_ioctl_desc i915_ioctls[] = {
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 6dbe14c..027cbfc 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -32,6 +32,7 @@
- #include "drm.h"
- #include "i915_drm.h"
- #include "i915_drv.h"
-+#include "intel_drv.h"
- 
- #include <linux/console.h>
- #include "drm_crtc_helper.h"
-@@ -43,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
- module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
- 
- unsigned int i915_powersave = 1;
--module_param_named(powersave, i915_powersave, int, 0400);
-+module_param_named(powersave, i915_powersave, int, 0600);
- 
- unsigned int i915_lvds_downclock = 0;
- module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
-@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
- 	.driver_data = (unsigned long) info }
- 
- static const struct intel_device_info intel_i830_info = {
--	.gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
-+	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- 
- static const struct intel_device_info intel_845g_info = {
--	.gen = 2, .is_i8xx = 1,
-+	.gen = 2,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- 
- static const struct intel_device_info intel_i85x_info = {
--	.gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
-+	.gen = 2, .is_i85x = 1, .is_mobile = 1,
- 	.cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- 
- static const struct intel_device_info intel_i865g_info = {
--	.gen = 2, .is_i8xx = 1,
-+	.gen = 2,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- 
- static const struct intel_device_info intel_i915g_info = {
--	.gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
-+	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- static const struct intel_device_info intel_i915gm_info = {
--	.gen = 3, .is_i9xx = 1,  .is_mobile = 1,
-+	.gen = 3, .is_mobile = 1,
- 	.cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
-+	.supports_tv = 1,
- };
- static const struct intel_device_info intel_i945g_info = {
--	.gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
-+	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
- };
- static const struct intel_device_info intel_i945gm_info = {
--	.gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
-+	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
- 	.has_hotplug = 1, .cursor_needs_physical = 1,
-+	.has_overlay = 1, .overlay_needs_physical = 1,
-+	.supports_tv = 1,
- };
- 
- static const struct intel_device_info intel_i965g_info = {
--	.gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
-+	.gen = 4, .is_broadwater = 1,
- 	.has_hotplug = 1,
-+	.has_overlay = 1,
- };
- 
- static const struct intel_device_info intel_i965gm_info = {
--	.gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
-+	.gen = 4, .is_crestline = 1,
- 	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
-+	.has_overlay = 1,
-+	.supports_tv = 1,
- };
- 
- static const struct intel_device_info intel_g33_info = {
--	.gen = 3, .is_g33 = 1, .is_i9xx = 1,
-+	.gen = 3, .is_g33 = 1,
- 	.need_gfx_hws = 1, .has_hotplug = 1,
-+	.has_overlay = 1,
- };
- 
- static const struct intel_device_info intel_g45_info = {
--	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-+	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
- 	.has_pipe_cxsr = 1, .has_hotplug = 1,
-+	.has_bsd_ring = 1,
- };
- 
- static const struct intel_device_info intel_gm45_info = {
--	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
-+	.gen = 4, .is_g4x = 1,
- 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- 	.has_pipe_cxsr = 1, .has_hotplug = 1,
-+	.supports_tv = 1,
-+	.has_bsd_ring = 1,
- };
- 
- static const struct intel_device_info intel_pineview_info = {
--	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
-+	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
- 	.need_gfx_hws = 1, .has_hotplug = 1,
-+	.has_overlay = 1,
- };
- 
- static const struct intel_device_info intel_ironlake_d_info = {
--	.gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
-+	.gen = 5,
- 	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
-+	.has_bsd_ring = 1,
- };
- 
- static const struct intel_device_info intel_ironlake_m_info = {
--	.gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
-+	.gen = 5, .is_mobile = 1,
- 	.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
-+	.has_bsd_ring = 1,
- };
- 
- static const struct intel_device_info intel_sandybridge_d_info = {
--	.gen = 6, .is_i965g = 1, .is_i9xx = 1,
-+	.gen = 6,
- 	.need_gfx_hws = 1, .has_hotplug = 1,
-+	.has_bsd_ring = 1,
-+	.has_blt_ring = 1,
- };
- 
- static const struct intel_device_info intel_sandybridge_m_info = {
--	.gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
-+	.gen = 6, .is_mobile = 1,
- 	.need_gfx_hws = 1, .has_hotplug = 1,
-+	.has_bsd_ring = 1,
-+	.has_blt_ring = 1,
- };
- 
- static const struct pci_device_id pciidlist[] = {		/* aka */
-@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
- 
- 	i915_save_state(dev);
- 
--	intel_opregion_free(dev, 1);
-+	intel_opregion_fini(dev);
- 
- 	/* Modeset on resume, not lid events */
- 	dev_priv->modeset_on_lid = 0;
-@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
- 	if (state.event == PM_EVENT_PRETHAW)
- 		return 0;
- 
-+	drm_kms_helper_poll_disable(dev);
-+
- 	error = i915_drm_freeze(dev);
- 	if (error)
- 		return error;
-@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
- 	int error = 0;
- 
- 	i915_restore_state(dev);
--
--	intel_opregion_init(dev, 1);
-+	intel_opregion_setup(dev);
- 
- 	/* KMS EnterVT equivalent */
- 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
- 		drm_helper_resume_force_mode(dev);
- 	}
- 
-+	intel_opregion_init(dev);
-+
- 	dev_priv->modeset_on_lid = 0;
- 
- 	return error;
-@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
- 
- int i915_resume(struct drm_device *dev)
- {
-+	int ret;
-+
- 	if (pci_enable_device(dev->pdev))
- 		return -EIO;
- 
- 	pci_set_master(dev->pdev);
- 
--	return i915_drm_thaw(dev);
-+	ret = i915_drm_thaw(dev);
-+	if (ret)
-+		return ret;
-+
-+	drm_kms_helper_poll_enable(dev);
-+	return 0;
-+}
-+
-+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+	if (IS_I85X(dev))
-+		return -ENODEV;
-+
-+	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
-+	POSTING_READ(D_STATE);
-+
-+	if (IS_I830(dev) || IS_845G(dev)) {
-+		I915_WRITE(DEBUG_RESET_I830,
-+			   DEBUG_RESET_DISPLAY |
-+			   DEBUG_RESET_RENDER |
-+			   DEBUG_RESET_FULL);
-+		POSTING_READ(DEBUG_RESET_I830);
-+		msleep(1);
-+
-+		I915_WRITE(DEBUG_RESET_I830, 0);
-+		POSTING_READ(DEBUG_RESET_I830);
-+	}
-+
-+	msleep(1);
-+
-+	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
-+	POSTING_READ(D_STATE);
-+
-+	return 0;
-+}
-+
-+static int i965_reset_complete(struct drm_device *dev)
-+{
-+	u8 gdrst;
-+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-+	return gdrst & 0x1;
-+}
-+
-+static int i965_do_reset(struct drm_device *dev, u8 flags)
-+{
-+	u8 gdrst;
-+
-+	/*
-+	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
-+	 * well as the reset bit (GR/bit 0).  Setting the GR bit
-+	 * triggers the reset; when done, the hardware will clear it.
-+	 */
-+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-+	pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
-+
-+	return wait_for(i965_reset_complete(dev), 500);
-+}
-+
-+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
-+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
-+	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
- }
- 
- /**
-@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
-  *   - re-init interrupt state
-  *   - re-init display
-  */
--int i965_reset(struct drm_device *dev, u8 flags)
-+int i915_reset(struct drm_device *dev, u8 flags)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	unsigned long timeout;
--	u8 gdrst;
- 	/*
- 	 * We really should only reset the display subsystem if we actually
- 	 * need to
- 	 */
- 	bool need_display = true;
-+	int ret;
- 
- 	mutex_lock(&dev->struct_mutex);
- 
--	/*
--	 * Clear request list
--	 */
--	i915_gem_retire_requests(dev);
--
--	if (need_display)
--		i915_save_display(dev);
--
--	if (IS_I965G(dev) || IS_G4X(dev)) {
--		/*
--		 * Set the domains we want to reset, then the reset bit (bit 0).
--		 * Clear the reset bit after a while and wait for hardware status
--		 * bit (bit 1) to be set
--		 */
--		pci_read_config_byte(dev->pdev, GDRST, &gdrst);
--		pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
--		udelay(50);
--		pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
--
--		/* ...we don't want to loop forever though, 500ms should be plenty */
--	       timeout = jiffies + msecs_to_jiffies(500);
--		do {
--			udelay(100);
--			pci_read_config_byte(dev->pdev, GDRST, &gdrst);
--		} while ((gdrst & 0x1) && time_after(timeout, jiffies));
--
--		if (gdrst & 0x1) {
--			WARN(true, "i915: Failed to reset chip\n");
--			mutex_unlock(&dev->struct_mutex);
--			return -EIO;
--		}
--	} else {
--		DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
-+	i915_gem_reset(dev);
-+
-+	ret = -ENODEV;
-+	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
-+		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-+	} else switch (INTEL_INFO(dev)->gen) {
-+	case 5:
-+		ret = ironlake_do_reset(dev, flags);
-+		break;
-+	case 4:
-+		ret = i965_do_reset(dev, flags);
-+		break;
-+	case 2:
-+		ret = i8xx_do_reset(dev, flags);
-+		break;
-+	}
-+	dev_priv->last_gpu_reset = get_seconds();
-+	if (ret) {
-+		DRM_ERROR("Failed to reset chip.\n");
- 		mutex_unlock(&dev->struct_mutex);
--		return -ENODEV;
-+		return ret;
- 	}
- 
- 	/* Ok, now get things going again... */
-@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
- 		mutex_lock(&dev->struct_mutex);
- 	}
- 
-+	mutex_unlock(&dev->struct_mutex);
-+
- 	/*
--	 * Display needs restore too...
-+	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
-+	 * need to retrain the display link and cannot just restore the register
-+	 * values.
- 	 */
--	if (need_display)
--		i915_restore_display(dev);
-+	if (need_display) {
-+		mutex_lock(&dev->mode_config.mutex);
-+		drm_helper_resume_force_mode(dev);
-+		mutex_unlock(&dev->mode_config.mutex);
-+	}
- 
--	mutex_unlock(&dev->struct_mutex);
- 	return 0;
- }
- 
-@@ -422,6 +508,8 @@ i915_pci_remove(struct pci_dev *pdev)
- {
- 	struct drm_device *dev = pci_get_drvdata(pdev);
- 
-+	pci_disable_device(pdev); /* core did previous enable */
-+
- 	drm_put_dev(dev);
- }
- 
-@@ -524,8 +612,6 @@ static struct drm_driver driver = {
- 	.irq_uninstall = i915_driver_irq_uninstall,
- 	.irq_handler = i915_driver_irq_handler,
- 	.reclaim_buffers = drm_core_reclaim_buffers,
--	.get_map_ofs = drm_core_get_map_ofs,
--	.get_reg_ofs = drm_core_get_reg_ofs,
- 	.master_create = i915_master_create,
- 	.master_destroy = i915_master_destroy,
- #if defined(CONFIG_DEBUG_FS)
-@@ -548,6 +634,7 @@ static struct drm_driver driver = {
- #ifdef CONFIG_COMPAT
- 		 .compat_ioctl = i915_compat_ioctl,
- #endif
-+		 .llseek = noop_llseek,
- 	},
- 
- 	.pci_driver = {
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index af4a263..90414ae 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -34,6 +34,8 @@
- #include "intel_bios.h"
- #include "intel_ringbuffer.h"
- #include <linux/io-mapping.h>
-+#include <linux/i2c.h>
-+#include <drm/intel-gtt.h>
- 
- /* General customization:
-  */
-@@ -73,11 +75,9 @@ enum plane {
- #define DRIVER_PATCHLEVEL	0
- 
- #define WATCH_COHERENCY	0
--#define WATCH_BUF	0
- #define WATCH_EXEC	0
--#define WATCH_LRU	0
- #define WATCH_RELOC	0
--#define WATCH_INACTIVE	0
-+#define WATCH_LISTS	0
- #define WATCH_PWRITE	0
- 
- #define I915_GEM_PHYS_CURSOR_0 1
-@@ -110,8 +110,9 @@ struct intel_opregion {
- 	struct opregion_acpi *acpi;
- 	struct opregion_swsci *swsci;
- 	struct opregion_asle *asle;
--	int enabled;
-+	void *vbt;
- };
-+#define OPREGION_SIZE            (8*1024)
- 
- struct intel_overlay;
- struct intel_overlay_error_state;
-@@ -125,13 +126,16 @@ struct drm_i915_master_private {
- struct drm_i915_fence_reg {
- 	struct drm_gem_object *obj;
- 	struct list_head lru_list;
-+	bool gpu;
- };
- 
- struct sdvo_device_mapping {
-+	u8 initialized;
- 	u8 dvo_port;
- 	u8 slave_addr;
- 	u8 dvo_wiring;
--	u8 initialized;
-+	u8 i2c_pin;
-+	u8 i2c_speed;
- 	u8 ddc_pin;
- };
- 
-@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
- struct intel_device_info {
- 	u8 gen;
- 	u8 is_mobile : 1;
--	u8 is_i8xx : 1;
- 	u8 is_i85x : 1;
- 	u8 is_i915g : 1;
--	u8 is_i9xx : 1;
- 	u8 is_i945gm : 1;
--	u8 is_i965g : 1;
--	u8 is_i965gm : 1;
- 	u8 is_g33 : 1;
- 	u8 need_gfx_hws : 1;
- 	u8 is_g4x : 1;
- 	u8 is_pineview : 1;
- 	u8 is_broadwater : 1;
- 	u8 is_crestline : 1;
--	u8 is_ironlake : 1;
- 	u8 has_fbc : 1;
- 	u8 has_rc6 : 1;
- 	u8 has_pipe_cxsr : 1;
- 	u8 has_hotplug : 1;
- 	u8 cursor_needs_physical : 1;
-+	u8 has_overlay : 1;
-+	u8 overlay_needs_physical : 1;
-+	u8 supports_tv : 1;
-+	u8 has_bsd_ring : 1;
-+	u8 has_blt_ring : 1;
- };
- 
- enum no_fbc_reason {
-+	FBC_NO_OUTPUT, /* no outputs enabled to compress */
- 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
- 	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
- 	FBC_MODE_TOO_LARGE, /* mode too large for compression */
-@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
- 
- 	void __iomem *regs;
- 
-+	struct intel_gmbus {
-+		struct i2c_adapter adapter;
-+		struct i2c_adapter *force_bit;
-+		u32 reg0;
-+	} *gmbus;
-+
- 	struct pci_dev *bridge_dev;
- 	struct intel_ring_buffer render_ring;
- 	struct intel_ring_buffer bsd_ring;
-+	struct intel_ring_buffer blt_ring;
- 	uint32_t next_seqno;
- 
- 	drm_dma_handle_t *status_page_dmah;
-@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
- 	int front_offset;
- 	int current_page;
- 	int page_flipping;
-+#define I915_DEBUG_READ (1<<0)
-+#define I915_DEBUG_WRITE (1<<1)
-+	unsigned long debug_flags;
- 
- 	wait_queue_head_t irq_queue;
- 	atomic_t irq_received;
-@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
- 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- 	int vblank_pipe;
- 	int num_pipe;
--	u32 flush_rings;
--#define FLUSH_RENDER_RING	0x1
--#define FLUSH_BSD_RING		0x2
- 
- 	/* For hangcheck timer */
--#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
-+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
- 	struct timer_list hangcheck_timer;
- 	int hangcheck_count;
- 	uint32_t last_acthd;
- 	uint32_t last_instdone;
- 	uint32_t last_instdone1;
- 
--	struct drm_mm vram;
--
- 	unsigned long cfb_size;
- 	unsigned long cfb_pitch;
-+	unsigned long cfb_offset;
- 	int cfb_fence;
- 	int cfb_plane;
-+	int cfb_y;
- 
- 	int irq_enabled;
- 
-@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
- 	struct intel_overlay *overlay;
- 
- 	/* LVDS info */
--	int backlight_duty_cycle;  /* restore backlight to this value */
--	bool panel_wants_dither;
-+	int backlight_level;  /* restore backlight to this value */
- 	struct drm_display_mode *panel_fixed_mode;
- 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
- 	unsigned int lvds_vbt:1;
- 	unsigned int int_crt_support:1;
- 	unsigned int lvds_use_ssc:1;
--	unsigned int edp_support:1;
- 	int lvds_ssc_freq;
--	int edp_bpp;
-+	struct {
-+		int rate;
-+		int lanes;
-+		int preemphasis;
-+		int vswing;
-+
-+		bool initialized;
-+		bool support;
-+		int bpp;
-+		struct edp_power_seq pps;
-+	} edp;
-+	bool no_aux_handshake;
- 
- 	struct notifier_block lid_notifier;
- 
--	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
-+	int crt_ddc_pin;
- 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
- 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
- 	spinlock_t error_lock;
- 	struct drm_i915_error_state *first_error;
- 	struct work_struct error_work;
-+	struct completion error_completion;
- 	struct workqueue_struct *wq;
- 
- 	/* Display functions */
-@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
- 	u32 saveMCHBAR_RENDER_STANDBY;
- 
- 	struct {
-+		/** Bridge to intel-gtt-ko */
-+		struct intel_gtt *gtt;
-+		/** Memory allocator for GTT stolen memory */
-+		struct drm_mm vram;
-+		/** Memory allocator for GTT */
- 		struct drm_mm gtt_space;
- 
- 		struct io_mapping *gtt_mapping;
-@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
- 		 */
- 		struct list_head shrink_list;
- 
--		spinlock_t active_list_lock;
-+		/**
-+		 * List of objects currently involved in rendering.
-+		 *
-+		 * Includes buffers having the contents of their GPU caches
-+		 * flushed, not necessarily primitives.  last_rendering_seqno
-+		 * represents when the rendering involved will be completed.
-+		 *
-+		 * A reference is held on the buffer while on this list.
-+		 */
-+		struct list_head active_list;
- 
- 		/**
- 		 * List of objects which are not in the ringbuffer but which
-@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
- 		struct list_head flushing_list;
- 
- 		/**
--		 * List of objects currently pending a GPU write flush.
--		 *
--		 * All elements on this list will belong to either the
--		 * active_list or flushing_list, last_rendering_seqno can
--		 * be used to differentiate between the two elements.
--		 */
--		struct list_head gpu_write_list;
--
--		/**
- 		 * LRU list of objects which are not in the ringbuffer and
- 		 * are ready to unbind, but are still in the GTT.
- 		 *
-@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
- 		 */
- 		struct list_head inactive_list;
- 
-+		/**
-+		 * LRU list of objects which are not in the ringbuffer but
-+		 * are still pinned in the GTT.
-+		 */
-+		struct list_head pinned_list;
-+
- 		/** LRU list of objects with fence regs on them. */
- 		struct list_head fence_list;
- 
-@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
- 
- 		/* storage for physical objects */
- 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
-+
-+		uint32_t flush_rings;
-+
-+		/* accounting, useful for userland debugging */
-+		size_t object_memory;
-+		size_t pin_memory;
-+		size_t gtt_memory;
-+		size_t gtt_total;
-+		u32 object_count;
-+		u32 pin_count;
-+		u32 gtt_count;
- 	} mm;
- 	struct sdvo_device_mapping sdvo_mappings[2];
- 	/* indicate whether the LVDS_BORDER should be enabled or not */
-@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
- 	/* Reclocking support */
- 	bool render_reclock_avail;
- 	bool lvds_downclock_avail;
--	/* indicate whether the LVDS EDID is OK */
--	bool lvds_edid_good;
- 	/* indicates the reduced downclock for LVDS*/
- 	int lvds_downclock;
- 	struct work_struct idle_work;
-@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
- 	struct drm_mm_node *compressed_fb;
- 	struct drm_mm_node *compressed_llb;
- 
-+	unsigned long last_gpu_reset;
-+
- 	/* list of fbdev register on this device */
- 	struct intel_fbdev *fbdev;
- } drm_i915_private_t;
-@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
- 	struct drm_mm_node *gtt_space;
- 
- 	/** This object's place on the active/flushing/inactive lists */
--	struct list_head list;
-+	struct list_head ring_list;
-+	struct list_head mm_list;
- 	/** This object's place on GPU write list */
- 	struct list_head gpu_write_list;
- 	/** This object's place on eviction list */
-@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
- 	/** global list entry for this request */
- 	struct list_head list;
- 
-+	struct drm_i915_file_private *file_priv;
- 	/** file_priv list entry for this request */
- 	struct list_head client_list;
- };
- 
- struct drm_i915_file_private {
- 	struct {
-+		struct spinlock lock;
- 		struct list_head request_list;
- 	} mm;
- };
-@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- extern int i915_emit_box(struct drm_device *dev,
- 			 struct drm_clip_rect *boxes,
- 			 int i, int DR1, int DR4);
--extern int i965_reset(struct drm_device *dev, u8 flags);
-+extern int i915_reset(struct drm_device *dev, u8 flags);
- extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
- extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
- extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
- 
- /* i915_irq.c */
- void i915_hangcheck_elapsed(unsigned long data);
--void i915_destroy_error_state(struct drm_device *dev);
- extern int i915_irq_emit(struct drm_device *dev, void *data,
- 			 struct drm_file *file_priv);
- extern int i915_irq_wait(struct drm_device *dev, void *data,
-@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
- 
- void intel_enable_asle (struct drm_device *dev);
- 
-+#ifdef CONFIG_DEBUG_FS
-+extern void i915_destroy_error_state(struct drm_device *dev);
-+#else
-+#define i915_destroy_error_state(x)
-+#endif
-+
- 
- /* i915_mem.c */
- extern int i915_mem_alloc(struct drm_device *dev, void *data,
-@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
- extern void i915_mem_release(struct drm_device * dev,
- 			     struct drm_file *file_priv, struct mem_block *heap);
- /* i915_gem.c */
-+int i915_gem_check_is_wedged(struct drm_device *dev);
- int i915_gem_init_ioctl(struct drm_device *dev, void *data,
- 			struct drm_file *file_priv);
- int i915_gem_create_ioctl(struct drm_device *dev, void *data,
-@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
- int i915_gem_object_unbind(struct drm_gem_object *obj);
- void i915_gem_release_mmap(struct drm_gem_object *obj);
- void i915_gem_lastclose(struct drm_device *dev);
--uint32_t i915_get_gem_seqno(struct drm_device *dev,
--		struct intel_ring_buffer *ring);
--bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
--int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
--int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-+
-+/**
-+ * Returns true if seq1 is later than seq2.
-+ */
-+static inline bool
-+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-+{
-+	return (int32_t)(seq1 - seq2) >= 0;
-+}
-+
-+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-+				  bool interruptible);
-+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-+				  bool interruptible);
- void i915_gem_retire_requests(struct drm_device *dev);
--void i915_gem_retire_work_handler(struct work_struct *work);
-+void i915_gem_reset(struct drm_device *dev);
- void i915_gem_clflush_object(struct drm_gem_object *obj);
- int i915_gem_object_set_domain(struct drm_gem_object *obj,
- 			       uint32_t read_domains,
-@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- int i915_gpu_idle(struct drm_device *dev);
- int i915_gem_idle(struct drm_device *dev);
- uint32_t i915_add_request(struct drm_device *dev,
--		struct drm_file *file_priv,
--		uint32_t flush_domains,
--		struct intel_ring_buffer *ring);
-+			  struct drm_file *file_priv,
-+			  struct drm_i915_gem_request *request,
-+			  struct intel_ring_buffer *ring);
- int i915_do_wait_request(struct drm_device *dev,
--		uint32_t seqno, int interruptible,
--		struct intel_ring_buffer *ring);
-+			 uint32_t seqno,
-+			 bool interruptible,
-+			 struct intel_ring_buffer *ring);
- int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
- int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- 				      int write);
--int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
-+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-+					 bool pipelined);
- int i915_gem_attach_phys_object(struct drm_device *dev,
- 				struct drm_gem_object *obj,
- 				int id,
-@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
- void i915_gem_detach_phys_object(struct drm_device *dev,
- 				 struct drm_gem_object *obj);
- void i915_gem_free_all_phys_object(struct drm_device *dev);
--int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
--void i915_gem_object_put_pages(struct drm_gem_object *obj);
- void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
--int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
- 
- void i915_gem_shrinker_init(void);
- void i915_gem_shrinker_exit(void);
-@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
- /* i915_gem_debug.c */
- void i915_gem_dump_object(struct drm_gem_object *obj, int len,
- 			  const char *where, uint32_t mark);
--#if WATCH_INACTIVE
--void i915_verify_inactive(struct drm_device *dev, char *file, int line);
-+#if WATCH_LISTS
-+int i915_verify_lists(struct drm_device *dev);
- #else
--#define i915_verify_inactive(dev, file, line)
-+#define i915_verify_lists(dev) 0
- #endif
- void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
- void i915_gem_dump_object(struct drm_gem_object *obj, int len,
- 			  const char *where, uint32_t mark);
--void i915_dump_lru(struct drm_device *dev, const char *where);
- 
- /* i915_debugfs.c */
- int i915_debugfs_init(struct drm_minor *minor);
-@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
- extern int i915_save_state(struct drm_device *dev);
- extern int i915_restore_state(struct drm_device *dev);
- 
-+/* intel_i2c.c */
-+extern int intel_setup_gmbus(struct drm_device *dev);
-+extern void intel_teardown_gmbus(struct drm_device *dev);
-+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
-+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
-+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
-+{
-+	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
-+}
-+extern void intel_i2c_reset(struct drm_device *dev);
-+
-+/* intel_opregion.c */
-+extern int intel_opregion_setup(struct drm_device *dev);
- #ifdef CONFIG_ACPI
--/* i915_opregion.c */
--extern int intel_opregion_init(struct drm_device *dev, int resume);
--extern void intel_opregion_free(struct drm_device *dev, int suspend);
--extern void opregion_asle_intr(struct drm_device *dev);
--extern void ironlake_opregion_gse_intr(struct drm_device *dev);
--extern void opregion_enable_asle(struct drm_device *dev);
-+extern void intel_opregion_init(struct drm_device *dev);
-+extern void intel_opregion_fini(struct drm_device *dev);
-+extern void intel_opregion_asle_intr(struct drm_device *dev);
-+extern void intel_opregion_gse_intr(struct drm_device *dev);
-+extern void intel_opregion_enable_asle(struct drm_device *dev);
- #else
--static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
--static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
--static inline void opregion_asle_intr(struct drm_device *dev) { return; }
--static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
--static inline void opregion_enable_asle(struct drm_device *dev) { return; }
-+static inline void intel_opregion_init(struct drm_device *dev) { return; }
-+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
-+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
-+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
-+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
- #endif
- 
-+/* intel_acpi.c */
-+#ifdef CONFIG_ACPI
-+extern void intel_register_dsm_handler(void);
-+extern void intel_unregister_dsm_handler(void);
-+#else
-+static inline void intel_register_dsm_handler(void) { return; }
-+static inline void intel_unregister_dsm_handler(void) { return; }
-+#endif /* CONFIG_ACPI */
-+
- /* modesetting */
- extern void intel_modeset_init(struct drm_device *dev);
- extern void intel_modeset_cleanup(struct drm_device *dev);
-@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
- extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
- 
- /* overlay */
-+#ifdef CONFIG_DEBUG_FS
- extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
- extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
-+#endif
- 
- /**
-  * Lock test for when it's just for synchronization of ring access.
-@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
- 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
- } while (0)
- 
--#define I915_READ(reg)          readl(dev_priv->regs + (reg))
--#define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
-+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
-+{
-+	u32 val;
-+
-+	val = readl(dev_priv->regs + reg);
-+	if (dev_priv->debug_flags & I915_DEBUG_READ)
-+		printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
-+	return val;
-+}
-+
-+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
-+			      u32 val)
-+{
-+	writel(val, dev_priv->regs + reg);
-+	if (dev_priv->debug_flags & I915_DEBUG_WRITE)
-+		printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
-+}
-+
-+#define I915_READ(reg)          i915_read(dev_priv, (reg))
-+#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
- #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
- #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
- #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
-@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
- #define POSTING_READ(reg)	(void)I915_READ(reg)
- #define POSTING_READ16(reg)	(void)I915_READ16(reg)
- 
-+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
-+				I915_DEBUG_WRITE)
-+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
-+							    I915_DEBUG_WRITE))
-+
- #define I915_VERBOSE 0
- 
- #define BEGIN_LP_RING(n)  do { \
-@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
- #define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
- #define IS_I945G(dev)		((dev)->pci_device == 0x2772)
- #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
--#define IS_I965G(dev)		(INTEL_INFO(dev)->is_i965g)
--#define IS_I965GM(dev)		(INTEL_INFO(dev)->is_i965gm)
- #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
- #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
- #define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
-@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
- #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
- #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
- #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
--#define IS_IRONLAKE(dev)	(INTEL_INFO(dev)->is_ironlake)
--#define IS_I9XX(dev)		(INTEL_INFO(dev)->is_i9xx)
- #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
- 
- #define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
-@@ -1188,36 +1290,38 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
- #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
- #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
- 
--#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
-+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
- #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
- 
-+#define HAS_OVERLAY(dev) 		(INTEL_INFO(dev)->has_overlay)
-+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
-+
- /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
-  * rows, which changed the alignment requirements and fence programming.
-  */
--#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
-+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- 						      IS_I915GM(dev)))
--#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
--#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
--#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
-+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
- #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
--#define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
--					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
--					!IS_GEN6(dev))
-+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
- #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
- /* dsparb controlled by hw only */
- #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
- 
--#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
- #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
- #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
- #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
- 
--#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||	\
--			    IS_GEN6(dev))
--#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
-+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
- 
- #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
- #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
- 
- #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
- 
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 90b1d67..6da2c6d 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -37,7 +37,9 @@
- #include <linux/intel-gtt.h>
- 
- static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
--static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-+
-+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
-+						  bool pipelined);
- static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
- static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
- static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
-@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- 						     uint64_t offset,
- 						     uint64_t size);
- static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
--static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
-+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
-+					  bool interruptible);
- static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- 					   unsigned alignment);
- static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
- 				struct drm_file *file_priv);
- static void i915_gem_free_object_tail(struct drm_gem_object *obj);
- 
-+static int
-+i915_gem_object_get_pages(struct drm_gem_object *obj,
-+			  gfp_t gfpmask);
-+
-+static void
-+i915_gem_object_put_pages(struct drm_gem_object *obj);
-+
- static LIST_HEAD(shrink_list);
- static DEFINE_SPINLOCK(shrink_list_lock);
- 
-+/* some bookkeeping */
-+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
-+				  size_t size)
-+{
-+	dev_priv->mm.object_count++;
-+	dev_priv->mm.object_memory += size;
-+}
-+
-+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
-+				     size_t size)
-+{
-+	dev_priv->mm.object_count--;
-+	dev_priv->mm.object_memory -= size;
-+}
-+
-+static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
-+				  size_t size)
-+{
-+	dev_priv->mm.gtt_count++;
-+	dev_priv->mm.gtt_memory += size;
-+}
-+
-+static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
-+				     size_t size)
-+{
-+	dev_priv->mm.gtt_count--;
-+	dev_priv->mm.gtt_memory -= size;
-+}
-+
-+static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
-+				  size_t size)
-+{
-+	dev_priv->mm.pin_count++;
-+	dev_priv->mm.pin_memory += size;
-+}
-+
-+static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
-+				     size_t size)
-+{
-+	dev_priv->mm.pin_count--;
-+	dev_priv->mm.pin_memory -= size;
-+}
-+
-+int
-+i915_gem_check_is_wedged(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct completion *x = &dev_priv->error_completion;
-+	unsigned long flags;
-+	int ret;
-+
-+	if (!atomic_read(&dev_priv->mm.wedged))
-+		return 0;
-+
-+	ret = wait_for_completion_interruptible(x);
-+	if (ret)
-+		return ret;
-+
-+	/* Success, we reset the GPU! */
-+	if (!atomic_read(&dev_priv->mm.wedged))
-+		return 0;
-+
-+	/* GPU is hung, bump the completion count to account for
-+	 * the token we just consumed so that we never hit zero and
-+	 * end up waiting upon a subsequent completion event that
-+	 * will never happen.
-+	 */
-+	spin_lock_irqsave(&x->wait.lock, flags);
-+	x->done++;
-+	spin_unlock_irqrestore(&x->wait.lock, flags);
-+	return -EIO;
-+}
-+
-+static int i915_mutex_lock_interruptible(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret;
-+
-+	ret = i915_gem_check_is_wedged(dev);
-+	if (ret)
-+		return ret;
-+
-+	ret = mutex_lock_interruptible(&dev->struct_mutex);
-+	if (ret)
-+		return ret;
-+
-+	if (atomic_read(&dev_priv->mm.wedged)) {
-+		mutex_unlock(&dev->struct_mutex);
-+		return -EAGAIN;
-+	}
-+
-+	WARN_ON(i915_verify_lists(dev));
-+	return 0;
-+}
-+
- static inline bool
- i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
- {
-@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
- 		obj_priv->pin_count == 0;
- }
- 
--int i915_gem_do_init(struct drm_device *dev, unsigned long start,
-+int i915_gem_do_init(struct drm_device *dev,
-+		     unsigned long start,
- 		     unsigned long end)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- 	drm_mm_init(&dev_priv->mm.gtt_space, start,
- 		    end - start);
- 
--	dev->gtt_total = (uint32_t) (end - start);
-+	dev_priv->mm.gtt_total = end - start;
- 
- 	return 0;
- }
-@@ -103,14 +209,16 @@ int
- i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- 			    struct drm_file *file_priv)
- {
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_get_aperture *args = data;
- 
- 	if (!(dev->driver->driver_features & DRIVER_GEM))
- 		return -ENODEV;
- 
--	args->aper_size = dev->gtt_total;
--	args->aper_available_size = (args->aper_size -
--				     atomic_read(&dev->pin_memory));
-+	mutex_lock(&dev->struct_mutex);
-+	args->aper_size = dev_priv->mm.gtt_total;
-+	args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
-+	mutex_unlock(&dev->struct_mutex);
- 
- 	return 0;
- }
-@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
- 		return -ENOMEM;
- 
- 	ret = drm_gem_handle_create(file_priv, obj, &handle);
--	/* drop reference from allocate - handle holds it now */
--	drm_gem_object_unreference_unlocked(obj);
- 	if (ret) {
-+		drm_gem_object_release(obj);
-+		i915_gem_info_remove_obj(dev->dev_private, obj->size);
-+		kfree(obj);
- 		return ret;
- 	}
- 
-+	/* drop reference from allocate - handle holds it now */
-+	drm_gem_object_unreference(obj);
-+	trace_i915_gem_object_create(obj);
-+
- 	args->handle = handle;
- 	return 0;
- }
-@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
- 		char __user *data,
- 		int length)
- {
--	char __iomem *vaddr;
--	int unwritten;
-+	char *vaddr;
-+	int ret;
- 
- 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
--	if (vaddr == NULL)
--		return -ENOMEM;
--	unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-+	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- 	kunmap_atomic(vaddr, KM_USER0);
- 
--	if (unwritten)
--		return -EFAULT;
--
--	return 0;
-+	return ret;
- }
- 
- static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
-@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 	loff_t offset, page_base;
- 	char __user *user_data;
- 	int page_offset, page_length;
--	int ret;
- 
- 	user_data = (char __user *) (uintptr_t) args->data_ptr;
- 	remain = args->size;
- 
--	mutex_lock(&dev->struct_mutex);
--
--	ret = i915_gem_object_get_pages(obj, 0);
--	if (ret != 0)
--		goto fail_unlock;
--
--	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
--							args->size);
--	if (ret != 0)
--		goto fail_put_pages;
--
- 	obj_priv = to_intel_bo(obj);
- 	offset = args->offset;
- 
-@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 		if ((page_offset + remain) > PAGE_SIZE)
- 			page_length = PAGE_SIZE - page_offset;
- 
--		ret = fast_shmem_read(obj_priv->pages,
--				      page_base, page_offset,
--				      user_data, page_length);
--		if (ret)
--			goto fail_put_pages;
-+		if (fast_shmem_read(obj_priv->pages,
-+				    page_base, page_offset,
-+				    user_data, page_length))
-+			return -EFAULT;
- 
- 		remain -= page_length;
- 		user_data += page_length;
- 		offset += page_length;
- 	}
- 
--fail_put_pages:
--	i915_gem_object_put_pages(obj);
--fail_unlock:
--	mutex_unlock(&dev->struct_mutex);
--
--	return ret;
-+	return 0;
- }
- 
- static int
-@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- 	num_pages = last_data_page - first_data_page + 1;
- 
--	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
-+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- 	if (user_pages == NULL)
- 		return -ENOMEM;
- 
-+	mutex_unlock(&dev->struct_mutex);
- 	down_read(&mm->mmap_sem);
- 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- 				      num_pages, 1, 0, user_pages, NULL);
- 	up_read(&mm->mmap_sem);
-+	mutex_lock(&dev->struct_mutex);
- 	if (pinned_pages < num_pages) {
- 		ret = -EFAULT;
--		goto fail_put_user_pages;
-+		goto out;
- 	}
- 
--	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
--
--	mutex_lock(&dev->struct_mutex);
--
--	ret = i915_gem_object_get_pages_or_evict(obj);
-+	ret = i915_gem_object_set_cpu_read_domain_range(obj,
-+							args->offset,
-+							args->size);
- 	if (ret)
--		goto fail_unlock;
-+		goto out;
- 
--	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
--							args->size);
--	if (ret != 0)
--		goto fail_put_pages;
-+	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- 
- 	obj_priv = to_intel_bo(obj);
- 	offset = args->offset;
-@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 		offset += page_length;
- 	}
- 
--fail_put_pages:
--	i915_gem_object_put_pages(obj);
--fail_unlock:
--	mutex_unlock(&dev->struct_mutex);
--fail_put_user_pages:
-+out:
- 	for (i = 0; i < pinned_pages; i++) {
- 		SetPageDirty(user_pages[i]);
- 		page_cache_release(user_pages[i]);
-@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- 	struct drm_i915_gem_pread *args = data;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
--	int ret;
-+	int ret = 0;
-+
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
- 
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
--	if (obj == NULL)
--		return -ENOENT;
-+	if (obj == NULL) {
-+		ret = -ENOENT;
-+		goto unlock;
-+	}
- 	obj_priv = to_intel_bo(obj);
- 
- 	/* Bounds check source.  */
- 	if (args->offset > obj->size || args->size > obj->size - args->offset) {
- 		ret = -EINVAL;
--		goto err;
-+		goto out;
- 	}
- 
-+	if (args->size == 0)
-+		goto out;
-+
- 	if (!access_ok(VERIFY_WRITE,
- 		       (char __user *)(uintptr_t)args->data_ptr,
- 		       args->size)) {
- 		ret = -EFAULT;
--		goto err;
-+		goto out;
- 	}
- 
--	if (i915_gem_object_needs_bit17_swizzle(obj)) {
--		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
--	} else {
--		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
--		if (ret != 0)
--			ret = i915_gem_shmem_pread_slow(dev, obj, args,
--							file_priv);
-+	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
-+				       args->size);
-+	if (ret) {
-+		ret = -EFAULT;
-+		goto out;
- 	}
- 
--err:
--	drm_gem_object_unreference_unlocked(obj);
-+	ret = i915_gem_object_get_pages_or_evict(obj);
-+	if (ret)
-+		goto out;
-+
-+	ret = i915_gem_object_set_cpu_read_domain_range(obj,
-+							args->offset,
-+							args->size);
-+	if (ret)
-+		goto out_put;
-+
-+	ret = -EFAULT;
-+	if (!i915_gem_object_needs_bit17_swizzle(obj))
-+		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
-+	if (ret == -EFAULT)
-+		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
-+
-+out_put:
-+	i915_gem_object_put_pages(obj);
-+out:
-+	drm_gem_object_unreference(obj);
-+unlock:
-+	mutex_unlock(&dev->struct_mutex);
- 	return ret;
- }
- 
-@@ -513,9 +623,7 @@ fast_user_write(struct io_mapping *mapping,
- 	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
- 						      user_data, length);
- 	io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
--	if (unwritten)
--		return -EFAULT;
--	return 0;
-+	return unwritten;
- }
- 
- /* Here's the write path which can sleep for
-@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
- 		 char __user *data,
- 		 int length)
- {
--	char __iomem *vaddr;
--	unsigned long unwritten;
-+	char *vaddr;
-+	int ret;
- 
- 	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
--	if (vaddr == NULL)
--		return -ENOMEM;
--	unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-+	ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- 	kunmap_atomic(vaddr, KM_USER0);
- 
--	if (unwritten)
--		return -EFAULT;
--	return 0;
-+	return ret;
- }
- 
- /**
-@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 	loff_t offset, page_base;
- 	char __user *user_data;
- 	int page_offset, page_length;
--	int ret;
- 
- 	user_data = (char __user *) (uintptr_t) args->data_ptr;
- 	remain = args->size;
- 
--
--	mutex_lock(&dev->struct_mutex);
--	ret = i915_gem_object_pin(obj, 0);
--	if (ret) {
--		mutex_unlock(&dev->struct_mutex);
--		return ret;
--	}
--	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
--	if (ret)
--		goto fail;
--
- 	obj_priv = to_intel_bo(obj);
- 	offset = obj_priv->gtt_offset + args->offset;
- 
-@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 		if ((page_offset + remain) > PAGE_SIZE)
- 			page_length = PAGE_SIZE - page_offset;
- 
--		ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
--				       page_offset, user_data, page_length);
--
- 		/* If we get a fault while copying data, then (presumably) our
- 		 * source page isn't available.  Return the error and we'll
- 		 * retry in the slow path.
- 		 */
--		if (ret)
--			goto fail;
-+		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
-+				    page_offset, user_data, page_length))
-+
-+			return -EFAULT;
- 
- 		remain -= page_length;
- 		user_data += page_length;
- 		offset += page_length;
- 	}
- 
--fail:
--	i915_gem_object_unpin(obj);
--	mutex_unlock(&dev->struct_mutex);
--
--	return ret;
-+	return 0;
- }
- 
- /**
-@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- 	num_pages = last_data_page - first_data_page + 1;
- 
--	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
-+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- 	if (user_pages == NULL)
- 		return -ENOMEM;
- 
-+	mutex_unlock(&dev->struct_mutex);
- 	down_read(&mm->mmap_sem);
- 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- 				      num_pages, 0, 0, user_pages, NULL);
- 	up_read(&mm->mmap_sem);
-+	mutex_lock(&dev->struct_mutex);
- 	if (pinned_pages < num_pages) {
- 		ret = -EFAULT;
- 		goto out_unpin_pages;
- 	}
- 
--	mutex_lock(&dev->struct_mutex);
--	ret = i915_gem_object_pin(obj, 0);
--	if (ret)
--		goto out_unlock;
--
- 	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- 	if (ret)
--		goto out_unpin_object;
-+		goto out_unpin_pages;
- 
- 	obj_priv = to_intel_bo(obj);
- 	offset = obj_priv->gtt_offset + args->offset;
-@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 		data_ptr += page_length;
- 	}
- 
--out_unpin_object:
--	i915_gem_object_unpin(obj);
--out_unlock:
--	mutex_unlock(&dev->struct_mutex);
- out_unpin_pages:
- 	for (i = 0; i < pinned_pages; i++)
- 		page_cache_release(user_pages[i]);
-@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 	loff_t offset, page_base;
- 	char __user *user_data;
- 	int page_offset, page_length;
--	int ret;
- 
- 	user_data = (char __user *) (uintptr_t) args->data_ptr;
- 	remain = args->size;
- 
--	mutex_lock(&dev->struct_mutex);
--
--	ret = i915_gem_object_get_pages(obj, 0);
--	if (ret != 0)
--		goto fail_unlock;
--
--	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
--	if (ret != 0)
--		goto fail_put_pages;
--
- 	obj_priv = to_intel_bo(obj);
- 	offset = args->offset;
- 	obj_priv->dirty = 1;
-@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
- 		if ((page_offset + remain) > PAGE_SIZE)
- 			page_length = PAGE_SIZE - page_offset;
- 
--		ret = fast_shmem_write(obj_priv->pages,
-+		if (fast_shmem_write(obj_priv->pages,
- 				       page_base, page_offset,
--				       user_data, page_length);
--		if (ret)
--			goto fail_put_pages;
-+				       user_data, page_length))
-+			return -EFAULT;
- 
- 		remain -= page_length;
- 		user_data += page_length;
- 		offset += page_length;
- 	}
- 
--fail_put_pages:
--	i915_gem_object_put_pages(obj);
--fail_unlock:
--	mutex_unlock(&dev->struct_mutex);
--
--	return ret;
-+	return 0;
- }
- 
- /**
-@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- 	num_pages = last_data_page - first_data_page + 1;
- 
--	user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
-+	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- 	if (user_pages == NULL)
- 		return -ENOMEM;
- 
-+	mutex_unlock(&dev->struct_mutex);
- 	down_read(&mm->mmap_sem);
- 	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- 				      num_pages, 0, 0, user_pages, NULL);
- 	up_read(&mm->mmap_sem);
-+	mutex_lock(&dev->struct_mutex);
- 	if (pinned_pages < num_pages) {
- 		ret = -EFAULT;
--		goto fail_put_user_pages;
-+		goto out;
- 	}
- 
--	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
--
--	mutex_lock(&dev->struct_mutex);
--
--	ret = i915_gem_object_get_pages_or_evict(obj);
-+	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- 	if (ret)
--		goto fail_unlock;
-+		goto out;
- 
--	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
--	if (ret != 0)
--		goto fail_put_pages;
-+	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- 
- 	obj_priv = to_intel_bo(obj);
- 	offset = args->offset;
-@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
- 		offset += page_length;
- 	}
- 
--fail_put_pages:
--	i915_gem_object_put_pages(obj);
--fail_unlock:
--	mutex_unlock(&dev->struct_mutex);
--fail_put_user_pages:
-+out:
- 	for (i = 0; i < pinned_pages; i++)
- 		page_cache_release(user_pages[i]);
- 	drm_free_large(user_pages);
-@@ -921,29 +976,46 @@ fail_put_user_pages:
-  */
- int
- i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
--		      struct drm_file *file_priv)
-+		      struct drm_file *file)
- {
- 	struct drm_i915_gem_pwrite *args = data;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
- 	int ret = 0;
- 
--	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
--	if (obj == NULL)
--		return -ENOENT;
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
-+
-+	obj = drm_gem_object_lookup(dev, file, args->handle);
-+	if (obj == NULL) {
-+		ret = -ENOENT;
-+		goto unlock;
-+	}
- 	obj_priv = to_intel_bo(obj);
- 
-+
- 	/* Bounds check destination. */
- 	if (args->offset > obj->size || args->size > obj->size - args->offset) {
- 		ret = -EINVAL;
--		goto err;
-+		goto out;
- 	}
- 
-+	if (args->size == 0)
-+		goto out;
-+
- 	if (!access_ok(VERIFY_READ,
- 		       (char __user *)(uintptr_t)args->data_ptr,
- 		       args->size)) {
- 		ret = -EFAULT;
--		goto err;
-+		goto out;
-+	}
-+
-+	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
-+				      args->size);
-+	if (ret) {
-+		ret = -EFAULT;
-+		goto out;
- 	}
- 
- 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
-@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- 	 * perspective, requiring manual detiling by the client.
- 	 */
- 	if (obj_priv->phys_obj)
--		ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
-+		ret = i915_gem_phys_pwrite(dev, obj, args, file);
- 	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
--		 dev->gtt_total != 0 &&
-+		 obj_priv->gtt_space &&
- 		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
--		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
--		if (ret == -EFAULT) {
--			ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
--						       file_priv);
--		}
--	} else if (i915_gem_object_needs_bit17_swizzle(obj)) {
--		ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
-+		ret = i915_gem_object_pin(obj, 0);
-+		if (ret)
-+			goto out;
-+
-+		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-+		if (ret)
-+			goto out_unpin;
-+
-+		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
-+		if (ret == -EFAULT)
-+			ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
-+
-+out_unpin:
-+		i915_gem_object_unpin(obj);
- 	} else {
--		ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
--		if (ret == -EFAULT) {
--			ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
--							 file_priv);
--		}
--	}
-+		ret = i915_gem_object_get_pages_or_evict(obj);
-+		if (ret)
-+			goto out;
- 
--#if WATCH_PWRITE
--	if (ret)
--		DRM_INFO("pwrite failed %d\n", ret);
--#endif
-+		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-+		if (ret)
-+			goto out_put;
- 
--err:
--	drm_gem_object_unreference_unlocked(obj);
-+		ret = -EFAULT;
-+		if (!i915_gem_object_needs_bit17_swizzle(obj))
-+			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
-+		if (ret == -EFAULT)
-+			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-+
-+out_put:
-+		i915_gem_object_put_pages(obj);
-+	}
-+
-+out:
-+	drm_gem_object_unreference(obj);
-+unlock:
-+	mutex_unlock(&dev->struct_mutex);
- 	return ret;
- }
- 
-@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- 	if (write_domain != 0 && read_domains != write_domain)
- 		return -EINVAL;
- 
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
-+
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
--	if (obj == NULL)
--		return -ENOENT;
-+	if (obj == NULL) {
-+		ret = -ENOENT;
-+		goto unlock;
-+	}
- 	obj_priv = to_intel_bo(obj);
- 
--	mutex_lock(&dev->struct_mutex);
--
- 	intel_mark_busy(dev, obj);
- 
--#if WATCH_BUF
--	DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
--		 obj, obj->size, read_domains, write_domain);
--#endif
- 	if (read_domains & I915_GEM_DOMAIN_GTT) {
- 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- 
-@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
- 	}
- 
--	
- 	/* Maintain LRU order of "inactive" objects */
- 	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
--		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-+		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- 
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
- 	return ret;
- }
-@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- {
- 	struct drm_i915_gem_sw_finish *args = data;
- 	struct drm_gem_object *obj;
--	struct drm_i915_gem_object *obj_priv;
- 	int ret = 0;
- 
- 	if (!(dev->driver->driver_features & DRIVER_GEM))
- 		return -ENODEV;
- 
--	mutex_lock(&dev->struct_mutex);
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
-+
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL) {
--		mutex_unlock(&dev->struct_mutex);
--		return -ENOENT;
-+		ret = -ENOENT;
-+		goto unlock;
- 	}
- 
--#if WATCH_BUF
--	DRM_INFO("%s: sw_finish %d (%p %zd)\n",
--		 __func__, args->handle, obj, obj->size);
--#endif
--	obj_priv = to_intel_bo(obj);
--
- 	/* Pinned buffers may be scanout, so flush the cache */
--	if (obj_priv->pin_count)
-+	if (to_intel_bo(obj)->pin_count)
- 		i915_gem_object_flush_cpu_write_domain(obj);
- 
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
- 	return ret;
- }
-@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- 
- 	/* Need a new fence register? */
- 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
--		ret = i915_gem_object_get_fence_reg(obj);
-+		ret = i915_gem_object_get_fence_reg(obj, true);
- 		if (ret)
- 			goto unlock;
- 	}
- 
- 	if (i915_gem_object_is_inactive(obj_priv))
--		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-+		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- 
- 	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
- 		page_offset;
-@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
- 						    obj->size / PAGE_SIZE, 0, 0);
- 	if (!list->file_offset_node) {
- 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
--		ret = -ENOMEM;
-+		ret = -ENOSPC;
- 		goto out_free_list;
- 	}
- 
-@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
- 	}
- 
- 	list->hash.key = list->file_offset_node->start;
--	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
-+	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-+	if (ret) {
- 		DRM_ERROR("failed to add to map hash\n");
--		ret = -ENOMEM;
- 		goto out_free_mm;
- 	}
- 
-@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
- 	 * Minimum alignment is 4k (GTT page size), but might be greater
- 	 * if a fence register is needed for the object.
- 	 */
--	if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
-+	if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
- 		return 4096;
- 
- 	/*
- 	 * Previous chips need to be aligned to the size of the smallest
- 	 * fence register that can contain the object.
- 	 */
--	if (IS_I9XX(dev))
-+	if (INTEL_INFO(dev)->gen == 3)
- 		start = 1024*1024;
- 	else
- 		start = 512*1024;
-@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- 	if (!(dev->driver->driver_features & DRIVER_GEM))
- 		return -ENODEV;
- 
--	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
--	if (obj == NULL)
--		return -ENOENT;
--
--	mutex_lock(&dev->struct_mutex);
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
- 
-+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+	if (obj == NULL) {
-+		ret = -ENOENT;
-+		goto unlock;
-+	}
- 	obj_priv = to_intel_bo(obj);
- 
- 	if (obj_priv->madv != I915_MADV_WILLNEED) {
- 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
--		drm_gem_object_unreference(obj);
--		mutex_unlock(&dev->struct_mutex);
--		return -EINVAL;
-+		ret = -EINVAL;
-+		goto out;
- 	}
- 
--
- 	if (!obj_priv->mmap_offset) {
- 		ret = i915_gem_create_mmap_offset(obj);
--		if (ret) {
--			drm_gem_object_unreference(obj);
--			mutex_unlock(&dev->struct_mutex);
--			return ret;
--		}
-+		if (ret)
-+			goto out;
- 	}
- 
- 	args->offset = obj_priv->mmap_offset;
-@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- 	 */
- 	if (!obj_priv->agp_mem) {
- 		ret = i915_gem_object_bind_to_gtt(obj, 0);
--		if (ret) {
--			drm_gem_object_unreference(obj);
--			mutex_unlock(&dev->struct_mutex);
--			return ret;
--		}
-+		if (ret)
-+			goto out;
- 	}
- 
-+out:
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
--
--	return 0;
-+	return ret;
- }
- 
--void
-+static void
- i915_gem_object_put_pages(struct drm_gem_object *obj)
- {
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
- 	obj_priv->pages = NULL;
- }
- 
-+static uint32_t
-+i915_gem_next_request_seqno(struct drm_device *dev,
-+			    struct intel_ring_buffer *ring)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+	ring->outstanding_lazy_request = true;
-+	return dev_priv->next_seqno;
-+}
-+
- static void
--i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
-+i915_gem_object_move_to_active(struct drm_gem_object *obj,
- 			       struct intel_ring_buffer *ring)
- {
- 	struct drm_device *dev = obj->dev;
--	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-+	uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
-+
- 	BUG_ON(ring == NULL);
- 	obj_priv->ring = ring;
- 
-@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
- 		drm_gem_object_reference(obj);
- 		obj_priv->active = 1;
- 	}
-+
- 	/* Move from whatever list we were on to the tail of execution. */
--	spin_lock(&dev_priv->mm.active_list_lock);
--	list_move_tail(&obj_priv->list, &ring->active_list);
--	spin_unlock(&dev_priv->mm.active_list_lock);
-+	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
-+	list_move_tail(&obj_priv->ring_list, &ring->active_list);
- 	obj_priv->last_rendering_seqno = seqno;
- }
- 
-@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 
- 	BUG_ON(!obj_priv->active);
--	list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
-+	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
-+	list_del_init(&obj_priv->ring_list);
- 	obj_priv->last_rendering_seqno = 0;
- }
- 
-@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 
--	i915_verify_inactive(dev, __FILE__, __LINE__);
- 	if (obj_priv->pin_count != 0)
--		list_del_init(&obj_priv->list);
-+		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
- 	else
--		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-+		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-+	list_del_init(&obj_priv->ring_list);
- 
- 	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
- 
-@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
- 		obj_priv->active = 0;
- 		drm_gem_object_unreference(obj);
- 	}
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	WARN_ON(i915_verify_lists(dev));
- }
- 
- static void
- i915_gem_process_flushing_list(struct drm_device *dev,
--			       uint32_t flush_domains, uint32_t seqno,
-+			       uint32_t flush_domains,
- 			       struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv, *next;
- 
- 	list_for_each_entry_safe(obj_priv, next,
--				 &dev_priv->mm.gpu_write_list,
-+				 &ring->gpu_write_list,
- 				 gpu_write_list) {
- 		struct drm_gem_object *obj = &obj_priv->base;
- 
--		if ((obj->write_domain & flush_domains) ==
--		    obj->write_domain &&
--		    obj_priv->ring->ring_flag == ring->ring_flag) {
-+		if (obj->write_domain & flush_domains) {
- 			uint32_t old_write_domain = obj->write_domain;
- 
- 			obj->write_domain = 0;
- 			list_del_init(&obj_priv->gpu_write_list);
--			i915_gem_object_move_to_active(obj, seqno, ring);
-+			i915_gem_object_move_to_active(obj, ring);
- 
- 			/* update the fence lru list */
- 			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
- }
- 
- uint32_t
--i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
--		 uint32_t flush_domains, struct intel_ring_buffer *ring)
-+i915_add_request(struct drm_device *dev,
-+		 struct drm_file *file,
-+		 struct drm_i915_gem_request *request,
-+		 struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	struct drm_i915_file_private *i915_file_priv = NULL;
--	struct drm_i915_gem_request *request;
-+	struct drm_i915_file_private *file_priv = NULL;
- 	uint32_t seqno;
- 	int was_empty;
- 
--	if (file_priv != NULL)
--		i915_file_priv = file_priv->driver_priv;
-+	if (file != NULL)
-+		file_priv = file->driver_priv;
- 
--	request = kzalloc(sizeof(*request), GFP_KERNEL);
--	if (request == NULL)
--		return 0;
-+	if (request == NULL) {
-+		request = kzalloc(sizeof(*request), GFP_KERNEL);
-+		if (request == NULL)
-+			return 0;
-+	}
- 
--	seqno = ring->add_request(dev, ring, file_priv, flush_domains);
-+	seqno = ring->add_request(dev, ring, 0);
-+	ring->outstanding_lazy_request = false;
- 
- 	request->seqno = seqno;
- 	request->ring = ring;
-@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- 	was_empty = list_empty(&ring->request_list);
- 	list_add_tail(&request->list, &ring->request_list);
- 
--	if (i915_file_priv) {
-+	if (file_priv) {
-+		spin_lock(&file_priv->mm.lock);
-+		request->file_priv = file_priv;
- 		list_add_tail(&request->client_list,
--			      &i915_file_priv->mm.request_list);
--	} else {
--		INIT_LIST_HEAD(&request->client_list);
-+			      &file_priv->mm.request_list);
-+		spin_unlock(&file_priv->mm.lock);
- 	}
- 
--	/* Associate any objects on the flushing list matching the write
--	 * domain we're flushing with our flush.
--	 */
--	if (flush_domains != 0) 
--		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
--
- 	if (!dev_priv->mm.suspended) {
--		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-+		mod_timer(&dev_priv->hangcheck_timer,
-+			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
- 		if (was_empty)
--			queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
-+			queue_delayed_work(dev_priv->wq,
-+					   &dev_priv->mm.retire_work, HZ);
- 	}
- 	return seqno;
- }
-@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-  * Ensures that all commands in the ring are finished
-  * before signalling the CPU
-  */
--static uint32_t
-+static void
- i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
- {
- 	uint32_t flush_domains = 0;
- 
- 	/* The sampler always gets flushed on i965 (sigh) */
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- 
- 	ring->flush(dev, ring,
- 			I915_GEM_DOMAIN_COMMAND, flush_domains);
--	return flush_domains;
- }
- 
--/**
-- * Moves buffers associated only with the given active seqno from the active
-- * to inactive list, potentially freeing them.
-- */
--static void
--i915_gem_retire_request(struct drm_device *dev,
--			struct drm_i915_gem_request *request)
-+static inline void
-+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
- {
--	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_i915_file_private *file_priv = request->file_priv;
- 
--	trace_i915_gem_request_retire(dev, request->seqno);
-+	if (!file_priv)
-+		return;
- 
--	/* Move any buffers on the active list that are no longer referenced
--	 * by the ringbuffer to the flushing/inactive lists as appropriate.
--	 */
--	spin_lock(&dev_priv->mm.active_list_lock);
--	while (!list_empty(&request->ring->active_list)) {
--		struct drm_gem_object *obj;
--		struct drm_i915_gem_object *obj_priv;
-+	spin_lock(&file_priv->mm.lock);
-+	list_del(&request->client_list);
-+	request->file_priv = NULL;
-+	spin_unlock(&file_priv->mm.lock);
-+}
- 
--		obj_priv = list_first_entry(&request->ring->active_list,
--					    struct drm_i915_gem_object,
--					    list);
--		obj = &obj_priv->base;
-+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
-+				      struct intel_ring_buffer *ring)
-+{
-+	while (!list_empty(&ring->request_list)) {
-+		struct drm_i915_gem_request *request;
- 
--		/* If the seqno being retired doesn't match the oldest in the
--		 * list, then the oldest in the list must still be newer than
--		 * this seqno.
--		 */
--		if (obj_priv->last_rendering_seqno != request->seqno)
--			goto out;
-+		request = list_first_entry(&ring->request_list,
-+					   struct drm_i915_gem_request,
-+					   list);
- 
--#if WATCH_LRU
--		DRM_INFO("%s: retire %d moves to inactive list %p\n",
--			 __func__, request->seqno, obj);
--#endif
-+		list_del(&request->list);
-+		i915_gem_request_remove_from_client(request);
-+		kfree(request);
-+	}
- 
--		if (obj->write_domain != 0)
--			i915_gem_object_move_to_flushing(obj);
--		else {
--			/* Take a reference on the object so it won't be
--			 * freed while the spinlock is held.  The list
--			 * protection for this spinlock is safe when breaking
--			 * the lock like this since the next thing we do
--			 * is just get the head of the list again.
--			 */
--			drm_gem_object_reference(obj);
--			i915_gem_object_move_to_inactive(obj);
--			spin_unlock(&dev_priv->mm.active_list_lock);
--			drm_gem_object_unreference(obj);
--			spin_lock(&dev_priv->mm.active_list_lock);
--		}
-+	while (!list_empty(&ring->active_list)) {
-+		struct drm_i915_gem_object *obj_priv;
-+
-+		obj_priv = list_first_entry(&ring->active_list,
-+					    struct drm_i915_gem_object,
-+					    ring_list);
-+
-+		obj_priv->base.write_domain = 0;
-+		list_del_init(&obj_priv->gpu_write_list);
-+		i915_gem_object_move_to_inactive(&obj_priv->base);
- 	}
--out:
--	spin_unlock(&dev_priv->mm.active_list_lock);
- }
- 
--/**
-- * Returns true if seq1 is later than seq2.
-- */
--bool
--i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-+void i915_gem_reset(struct drm_device *dev)
- {
--	return (int32_t)(seq1 - seq2) >= 0;
--}
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct drm_i915_gem_object *obj_priv;
-+	int i;
- 
--uint32_t
--i915_get_gem_seqno(struct drm_device *dev,
--		   struct intel_ring_buffer *ring)
--{
--	return ring->get_gem_seqno(dev, ring);
-+	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
-+	i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
-+	i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
-+
-+	/* Remove anything from the flushing lists. The GPU cache is likely
-+	 * to be lost on reset along with the data, so simply move the
-+	 * lost bo to the inactive list.
-+	 */
-+	while (!list_empty(&dev_priv->mm.flushing_list)) {
-+		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-+					    struct drm_i915_gem_object,
-+					    mm_list);
-+
-+		obj_priv->base.write_domain = 0;
-+		list_del_init(&obj_priv->gpu_write_list);
-+		i915_gem_object_move_to_inactive(&obj_priv->base);
-+	}
-+
-+	/* Move everything out of the GPU domains to ensure we do any
-+	 * necessary invalidation upon reuse.
-+	 */
-+	list_for_each_entry(obj_priv,
-+			    &dev_priv->mm.inactive_list,
-+			    mm_list)
-+	{
-+		obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-+	}
-+
-+	/* The fence registers are invalidated so clear them out */
-+	for (i = 0; i < 16; i++) {
-+		struct drm_i915_fence_reg *reg;
-+
-+		reg = &dev_priv->fence_regs[i];
-+		if (!reg->obj)
-+			continue;
-+
-+		i915_gem_clear_fence_reg(reg->obj);
-+	}
- }
- 
- /**
-@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	uint32_t seqno;
- 
--	if (!ring->status_page.page_addr
--			|| list_empty(&ring->request_list))
-+	if (!ring->status_page.page_addr ||
-+	    list_empty(&ring->request_list))
- 		return;
- 
--	seqno = i915_get_gem_seqno(dev, ring);
-+	WARN_ON(i915_verify_lists(dev));
- 
-+	seqno = ring->get_seqno(dev, ring);
- 	while (!list_empty(&ring->request_list)) {
- 		struct drm_i915_gem_request *request;
--		uint32_t retiring_seqno;
- 
- 		request = list_first_entry(&ring->request_list,
- 					   struct drm_i915_gem_request,
- 					   list);
--		retiring_seqno = request->seqno;
- 
--		if (i915_seqno_passed(seqno, retiring_seqno) ||
--		    atomic_read(&dev_priv->mm.wedged)) {
--			i915_gem_retire_request(dev, request);
-+		if (!i915_seqno_passed(seqno, request->seqno))
-+			break;
-+
-+		trace_i915_gem_request_retire(dev, request->seqno);
-+
-+		list_del(&request->list);
-+		i915_gem_request_remove_from_client(request);
-+		kfree(request);
-+	}
-+
-+	/* Move any buffers on the active list that are no longer referenced
-+	 * by the ringbuffer to the flushing/inactive lists as appropriate.
-+	 */
-+	while (!list_empty(&ring->active_list)) {
-+		struct drm_gem_object *obj;
-+		struct drm_i915_gem_object *obj_priv;
-+
-+		obj_priv = list_first_entry(&ring->active_list,
-+					    struct drm_i915_gem_object,
-+					    ring_list);
- 
--			list_del(&request->list);
--			list_del(&request->client_list);
--			kfree(request);
--		} else
-+		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
- 			break;
-+
-+		obj = &obj_priv->base;
-+		if (obj->write_domain != 0)
-+			i915_gem_object_move_to_flushing(obj);
-+		else
-+			i915_gem_object_move_to_inactive(obj);
- 	}
- 
- 	if (unlikely (dev_priv->trace_irq_seqno &&
- 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
--
- 		ring->user_irq_put(dev, ring);
- 		dev_priv->trace_irq_seqno = 0;
- 	}
-+
-+	WARN_ON(i915_verify_lists(dev));
- }
- 
- void
-@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
- 	     */
- 	    list_for_each_entry_safe(obj_priv, tmp,
- 				     &dev_priv->mm.deferred_free_list,
--				     list)
-+				     mm_list)
- 		    i915_gem_free_object_tail(&obj_priv->base);
- 	}
- 
- 	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
--	if (HAS_BSD(dev))
--		i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
-+	i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
-+	i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
- }
- 
--void
-+static void
- i915_gem_retire_work_handler(struct work_struct *work)
- {
- 	drm_i915_private_t *dev_priv;
-@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
- 				mm.retire_work.work);
- 	dev = dev_priv->dev;
- 
--	mutex_lock(&dev->struct_mutex);
-+	/* Come back later if the device is busy... */
-+	if (!mutex_trylock(&dev->struct_mutex)) {
-+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
-+		return;
-+	}
-+
- 	i915_gem_retire_requests(dev);
- 
- 	if (!dev_priv->mm.suspended &&
- 		(!list_empty(&dev_priv->render_ring.request_list) ||
--			(HAS_BSD(dev) &&
--			 !list_empty(&dev_priv->bsd_ring.request_list))))
-+		 !list_empty(&dev_priv->bsd_ring.request_list) ||
-+		 !list_empty(&dev_priv->blt_ring.request_list)))
- 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
- 	mutex_unlock(&dev->struct_mutex);
- }
- 
- int
- i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
--		int interruptible, struct intel_ring_buffer *ring)
-+		     bool interruptible, struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	u32 ier;
-@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- 	BUG_ON(seqno == 0);
- 
- 	if (atomic_read(&dev_priv->mm.wedged))
--		return -EIO;
-+		return -EAGAIN;
- 
--	if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
-+	if (ring->outstanding_lazy_request) {
-+		seqno = i915_add_request(dev, NULL, NULL, ring);
-+		if (seqno == 0)
-+			return -ENOMEM;
-+	}
-+	BUG_ON(seqno == dev_priv->next_seqno);
-+
-+	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
- 		if (HAS_PCH_SPLIT(dev))
- 			ier = I915_READ(DEIER) | I915_READ(GTIER);
- 		else
-@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- 		if (interruptible)
- 			ret = wait_event_interruptible(ring->irq_queue,
- 				i915_seqno_passed(
--					ring->get_gem_seqno(dev, ring), seqno)
-+					ring->get_seqno(dev, ring), seqno)
- 				|| atomic_read(&dev_priv->mm.wedged));
- 		else
- 			wait_event(ring->irq_queue,
- 				i915_seqno_passed(
--					ring->get_gem_seqno(dev, ring), seqno)
-+					ring->get_seqno(dev, ring), seqno)
- 				|| atomic_read(&dev_priv->mm.wedged));
- 
- 		ring->user_irq_put(dev, ring);
-@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- 		trace_i915_gem_request_wait_end(dev, seqno);
- 	}
- 	if (atomic_read(&dev_priv->mm.wedged))
--		ret = -EIO;
-+		ret = -EAGAIN;
- 
- 	if (ret && ret != -ERESTARTSYS)
--		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
--			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
-+		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-+			  __func__, ret, seqno, ring->get_seqno(dev, ring),
-+			  dev_priv->next_seqno);
- 
- 	/* Directly dispatch request retiring.  While we have the work queue
- 	 * to handle this, the waiter on a request often wants an associated
-@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-  */
- static int
- i915_wait_request(struct drm_device *dev, uint32_t seqno,
--		struct intel_ring_buffer *ring)
-+		  struct intel_ring_buffer *ring)
- {
- 	return i915_do_wait_request(dev, seqno, 1, ring);
- }
- 
- static void
-+i915_gem_flush_ring(struct drm_device *dev,
-+		    struct drm_file *file_priv,
-+		    struct intel_ring_buffer *ring,
-+		    uint32_t invalidate_domains,
-+		    uint32_t flush_domains)
-+{
-+	ring->flush(dev, ring, invalidate_domains, flush_domains);
-+	i915_gem_process_flushing_list(dev, flush_domains, ring);
-+}
-+
-+static void
- i915_gem_flush(struct drm_device *dev,
-+	       struct drm_file *file_priv,
- 	       uint32_t invalidate_domains,
--	       uint32_t flush_domains)
-+	       uint32_t flush_domains,
-+	       uint32_t flush_rings)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-+
- 	if (flush_domains & I915_GEM_DOMAIN_CPU)
- 		drm_agp_chipset_flush(dev);
--	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
--			invalidate_domains,
--			flush_domains);
--
--	if (HAS_BSD(dev))
--		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
--				invalidate_domains,
--				flush_domains);
-+
-+	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
-+		if (flush_rings & RING_RENDER)
-+			i915_gem_flush_ring(dev, file_priv,
-+					    &dev_priv->render_ring,
-+					    invalidate_domains, flush_domains);
-+		if (flush_rings & RING_BSD)
-+			i915_gem_flush_ring(dev, file_priv,
-+					    &dev_priv->bsd_ring,
-+					    invalidate_domains, flush_domains);
-+		if (flush_rings & RING_BLT)
-+			i915_gem_flush_ring(dev, file_priv,
-+					    &dev_priv->blt_ring,
-+					    invalidate_domains, flush_domains);
-+	}
- }
- 
- /**
-@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
-  * safe to unbind from the GTT or access from the CPU.
-  */
- static int
--i915_gem_object_wait_rendering(struct drm_gem_object *obj)
-+i915_gem_object_wait_rendering(struct drm_gem_object *obj,
-+			       bool interruptible)
- {
- 	struct drm_device *dev = obj->dev;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
- 	 * it.
- 	 */
- 	if (obj_priv->active) {
--#if WATCH_BUF
--		DRM_INFO("%s: object %p wait for seqno %08x\n",
--			  __func__, obj, obj_priv->last_rendering_seqno);
--#endif
--		ret = i915_wait_request(dev,
--				obj_priv->last_rendering_seqno, obj_priv->ring);
--		if (ret != 0)
-+		ret = i915_do_wait_request(dev,
-+					   obj_priv->last_rendering_seqno,
-+					   interruptible,
-+					   obj_priv->ring);
-+		if (ret)
- 			return ret;
- 	}
- 
-@@ -1952,14 +2111,10 @@ int
- i915_gem_object_unbind(struct drm_gem_object *obj)
- {
- 	struct drm_device *dev = obj->dev;
--	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 	int ret = 0;
- 
--#if WATCH_BUF
--	DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
--	DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
--#endif
- 	if (obj_priv->gtt_space == NULL)
- 		return 0;
- 
-@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
- 	 * should be safe and we need to cleanup or else we might
- 	 * cause memory corruption through use-after-free.
- 	 */
-+	if (ret) {
-+		i915_gem_clflush_object(obj);
-+		obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
-+	}
- 
- 	/* release the fence reg _after_ flushing */
- 	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- 		i915_gem_clear_fence_reg(obj);
- 
--	if (obj_priv->agp_mem != NULL) {
--		drm_unbind_agp(obj_priv->agp_mem);
--		drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
--		obj_priv->agp_mem = NULL;
--	}
-+	drm_unbind_agp(obj_priv->agp_mem);
-+	drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- 
- 	i915_gem_object_put_pages(obj);
- 	BUG_ON(obj_priv->pages_refcount);
- 
--	if (obj_priv->gtt_space) {
--		atomic_dec(&dev->gtt_count);
--		atomic_sub(obj->size, &dev->gtt_memory);
--
--		drm_mm_put_block(obj_priv->gtt_space);
--		obj_priv->gtt_space = NULL;
--	}
-+	i915_gem_info_remove_gtt(dev_priv, obj->size);
-+	list_del_init(&obj_priv->mm_list);
- 
--	/* Remove ourselves from the LRU list if present. */
--	spin_lock(&dev_priv->mm.active_list_lock);
--	if (!list_empty(&obj_priv->list))
--		list_del_init(&obj_priv->list);
--	spin_unlock(&dev_priv->mm.active_list_lock);
-+	drm_mm_put_block(obj_priv->gtt_space);
-+	obj_priv->gtt_space = NULL;
-+	obj_priv->gtt_offset = 0;
- 
- 	if (i915_gem_object_is_purgeable(obj_priv))
- 		i915_gem_object_truncate(obj);
-@@ -2020,48 +2169,48 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
- 	return ret;
- }
- 
-+static int i915_ring_idle(struct drm_device *dev,
-+			  struct intel_ring_buffer *ring)
-+{
-+	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
-+		return 0;
-+
-+	i915_gem_flush_ring(dev, NULL, ring,
-+			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-+	return i915_wait_request(dev,
-+				 i915_gem_next_request_seqno(dev, ring),
-+				 ring);
-+}
-+
- int
- i915_gpu_idle(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	bool lists_empty;
--	uint32_t seqno1, seqno2;
- 	int ret;
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
- 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
--		       list_empty(&dev_priv->render_ring.active_list) &&
--		       (!HAS_BSD(dev) ||
--			list_empty(&dev_priv->bsd_ring.active_list)));
--	spin_unlock(&dev_priv->mm.active_list_lock);
--
-+		       list_empty(&dev_priv->mm.active_list));
- 	if (lists_empty)
- 		return 0;
- 
- 	/* Flush everything onto the inactive list. */
--	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
--	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
--			&dev_priv->render_ring);
--	if (seqno1 == 0)
--		return -ENOMEM;
--	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
--
--	if (HAS_BSD(dev)) {
--		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
--				&dev_priv->bsd_ring);
--		if (seqno2 == 0)
--			return -ENOMEM;
-+	ret = i915_ring_idle(dev, &dev_priv->render_ring);
-+	if (ret)
-+		return ret;
- 
--		ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
--		if (ret)
--			return ret;
--	}
-+	ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
-+	if (ret)
-+		return ret;
- 
-+	ret = i915_ring_idle(dev, &dev_priv->blt_ring);
-+	if (ret)
-+		return ret;
- 
--	return ret;
-+	return 0;
- }
- 
--int
-+static int
- i915_gem_object_get_pages(struct drm_gem_object *obj,
- 			  gfp_t gfpmask)
- {
-@@ -2241,7 +2390,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
- 	I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
- }
- 
--static int i915_find_fence_reg(struct drm_device *dev)
-+static int i915_find_fence_reg(struct drm_device *dev,
-+			       bool interruptible)
- {
- 	struct drm_i915_fence_reg *reg = NULL;
- 	struct drm_i915_gem_object *obj_priv = NULL;
-@@ -2286,7 +2436,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
- 	 * private reference to obj like the other callers of put_fence_reg
- 	 * (set_tiling ioctl) do. */
- 	drm_gem_object_reference(obj);
--	ret = i915_gem_object_put_fence_reg(obj);
-+	ret = i915_gem_object_put_fence_reg(obj, interruptible);
- 	drm_gem_object_unreference(obj);
- 	if (ret != 0)
- 		return ret;
-@@ -2308,7 +2458,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
-  * and tiling format.
-  */
- int
--i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
-+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-+			      bool interruptible)
- {
- 	struct drm_device *dev = obj->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -2343,7 +2494,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
- 		break;
- 	}
- 
--	ret = i915_find_fence_reg(dev);
-+	ret = i915_find_fence_reg(dev, interruptible);
- 	if (ret < 0)
- 		return ret;
- 
-@@ -2421,15 +2572,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
-  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
-  * to the buffer to finish, and then resets the fence register.
-  * @obj: tiled object holding a fence register.
-+ * @bool: whether the wait upon the fence is interruptible
-  *
-  * Zeroes out the fence register itself and clears out the associated
-  * data structures in dev_priv and obj_priv.
-  */
- int
--i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
-+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-+			      bool interruptible)
- {
- 	struct drm_device *dev = obj->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-+	struct drm_i915_fence_reg *reg;
- 
- 	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- 		return 0;
-@@ -2444,20 +2599,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
- 	 * therefore we must wait for any outstanding access to complete
- 	 * before clearing the fence.
- 	 */
--	if (!IS_I965G(dev)) {
-+	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-+	if (reg->gpu) {
- 		int ret;
- 
--		ret = i915_gem_object_flush_gpu_write_domain(obj);
--		if (ret != 0)
-+		ret = i915_gem_object_flush_gpu_write_domain(obj, true);
-+		if (ret)
- 			return ret;
- 
--		ret = i915_gem_object_wait_rendering(obj);
--		if (ret != 0)
-+		ret = i915_gem_object_wait_rendering(obj, interruptible);
-+		if (ret)
- 			return ret;
-+
-+		reg->gpu = false;
- 	}
- 
- 	i915_gem_object_flush_gtt_write_domain(obj);
--	i915_gem_clear_fence_reg (obj);
-+	i915_gem_clear_fence_reg(obj);
- 
- 	return 0;
- }
-@@ -2490,7 +2648,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
- 	/* If the object is bigger than the entire aperture, reject it early
- 	 * before evicting everything in a vain attempt to find space.
- 	 */
--	if (obj->size > dev->gtt_total) {
-+	if (obj->size > dev_priv->mm.gtt_total) {
- 		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
- 		return -E2BIG;
- 	}
-@@ -2498,19 +2656,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
-  search_free:
- 	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- 					obj->size, alignment, 0);
--	if (free_space != NULL) {
-+	if (free_space != NULL)
- 		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- 						       alignment);
--		if (obj_priv->gtt_space != NULL)
--			obj_priv->gtt_offset = obj_priv->gtt_space->start;
--	}
- 	if (obj_priv->gtt_space == NULL) {
- 		/* If the gtt is empty and we're still having trouble
- 		 * fitting our object in, we're out of memory.
- 		 */
--#if WATCH_LRU
--		DRM_INFO("%s: GTT full, evicting something\n", __func__);
--#endif
- 		ret = i915_gem_evict_something(dev, obj->size, alignment);
- 		if (ret)
- 			return ret;
-@@ -2518,10 +2670,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
- 		goto search_free;
- 	}
- 
--#if WATCH_BUF
--	DRM_INFO("Binding object of size %zd at 0x%08x\n",
--		 obj->size, obj_priv->gtt_offset);
--#endif
- 	ret = i915_gem_object_get_pages(obj, gfpmask);
- 	if (ret) {
- 		drm_mm_put_block(obj_priv->gtt_space);
-@@ -2553,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
- 	obj_priv->agp_mem = drm_agp_bind_pages(dev,
- 					       obj_priv->pages,
- 					       obj->size >> PAGE_SHIFT,
--					       obj_priv->gtt_offset,
-+					       obj_priv->gtt_space->start,
- 					       obj_priv->agp_type);
- 	if (obj_priv->agp_mem == NULL) {
- 		i915_gem_object_put_pages(obj);
-@@ -2566,11 +2714,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
- 
- 		goto search_free;
- 	}
--	atomic_inc(&dev->gtt_count);
--	atomic_add(obj->size, &dev->gtt_memory);
- 
- 	/* keep track of bounds object by adding it to the inactive list */
--	list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-+	list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-+	i915_gem_info_add_gtt(dev_priv, obj->size);
- 
- 	/* Assert that the object is not currently in any GPU domain. As it
- 	 * wasn't in the GTT, there shouldn't be any way it could have been in
-@@ -2579,6 +2726,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
- 	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- 	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
- 
-+	obj_priv->gtt_offset = obj_priv->gtt_space->start;
- 	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
- 
- 	return 0;
-@@ -2603,25 +2751,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
- 
- /** Flushes any GPU write domain for the object if it's dirty. */
- static int
--i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
-+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
-+				       bool pipelined)
- {
- 	struct drm_device *dev = obj->dev;
- 	uint32_t old_write_domain;
--	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 
- 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- 		return 0;
- 
- 	/* Queue the GPU write cache flushing we need. */
- 	old_write_domain = obj->write_domain;
--	i915_gem_flush(dev, 0, obj->write_domain);
--	if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
--		return -ENOMEM;
-+	i915_gem_flush_ring(dev, NULL,
-+			    to_intel_bo(obj)->ring,
-+			    0, obj->write_domain);
-+	BUG_ON(obj->write_domain);
- 
- 	trace_i915_gem_object_change_domain(obj,
- 					    obj->read_domains,
- 					    old_write_domain);
--	return 0;
-+
-+	if (pipelined)
-+		return 0;
-+
-+	return i915_gem_object_wait_rendering(obj, true);
- }
- 
- /** Flushes the GTT write domain for the object if it's dirty. */
-@@ -2665,26 +2818,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
- 					    old_write_domain);
- }
- 
--int
--i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
--{
--	int ret = 0;
--
--	switch (obj->write_domain) {
--	case I915_GEM_DOMAIN_GTT:
--		i915_gem_object_flush_gtt_write_domain(obj);
--		break;
--	case I915_GEM_DOMAIN_CPU:
--		i915_gem_object_flush_cpu_write_domain(obj);
--		break;
--	default:
--		ret = i915_gem_object_flush_gpu_write_domain(obj);
--		break;
--	}
--
--	return ret;
--}
--
- /**
-  * Moves a single object to the GTT read, and possibly write domain.
-  *
-@@ -2702,32 +2835,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
- 	if (obj_priv->gtt_space == NULL)
- 		return -EINVAL;
- 
--	ret = i915_gem_object_flush_gpu_write_domain(obj);
-+	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- 	if (ret != 0)
- 		return ret;
- 
--	/* Wait on any GPU rendering and flushing to occur. */
--	ret = i915_gem_object_wait_rendering(obj);
--	if (ret != 0)
--		return ret;
-+	i915_gem_object_flush_cpu_write_domain(obj);
-+
-+	if (write) {
-+		ret = i915_gem_object_wait_rendering(obj, true);
-+		if (ret)
-+			return ret;
-+	}
- 
- 	old_write_domain = obj->write_domain;
- 	old_read_domains = obj->read_domains;
- 
--	/* If we're writing through the GTT domain, then CPU and GPU caches
--	 * will need to be invalidated at next use.
--	 */
--	if (write)
--		obj->read_domains &= I915_GEM_DOMAIN_GTT;
--
--	i915_gem_object_flush_cpu_write_domain(obj);
--
- 	/* It should now be out of any other write domains, and we can update
- 	 * the domain values for our changes.
- 	 */
- 	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
- 	if (write) {
-+		obj->read_domains = I915_GEM_DOMAIN_GTT;
- 		obj->write_domain = I915_GEM_DOMAIN_GTT;
- 		obj_priv->dirty = 1;
- 	}
-@@ -2744,51 +2873,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
-  * wait, as in modesetting process we're not supposed to be interrupted.
-  */
- int
--i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
-+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-+				     bool pipelined)
- {
--	struct drm_device *dev = obj->dev;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
--	uint32_t old_write_domain, old_read_domains;
-+	uint32_t old_read_domains;
- 	int ret;
- 
- 	/* Not valid to be called on unbound objects. */
- 	if (obj_priv->gtt_space == NULL)
- 		return -EINVAL;
- 
--	ret = i915_gem_object_flush_gpu_write_domain(obj);
-+	ret = i915_gem_object_flush_gpu_write_domain(obj, true);
- 	if (ret)
- 		return ret;
- 
--	/* Wait on any GPU rendering and flushing to occur. */
--	if (obj_priv->active) {
--#if WATCH_BUF
--		DRM_INFO("%s: object %p wait for seqno %08x\n",
--			  __func__, obj, obj_priv->last_rendering_seqno);
--#endif
--		ret = i915_do_wait_request(dev,
--				obj_priv->last_rendering_seqno,
--				0,
--				obj_priv->ring);
--		if (ret != 0)
-+	/* Currently, we are always called from an non-interruptible context. */
-+	if (!pipelined) {
-+		ret = i915_gem_object_wait_rendering(obj, false);
-+		if (ret)
- 			return ret;
- 	}
- 
- 	i915_gem_object_flush_cpu_write_domain(obj);
- 
--	old_write_domain = obj->write_domain;
- 	old_read_domains = obj->read_domains;
--
--	/* It should now be out of any other write domains, and we can update
--	 * the domain values for our changes.
--	 */
--	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
--	obj->read_domains = I915_GEM_DOMAIN_GTT;
--	obj->write_domain = I915_GEM_DOMAIN_GTT;
--	obj_priv->dirty = 1;
-+	obj->read_domains |= I915_GEM_DOMAIN_GTT;
- 
- 	trace_i915_gem_object_change_domain(obj,
- 					    old_read_domains,
--					    old_write_domain);
-+					    obj->write_domain);
- 
- 	return 0;
- }
-@@ -2805,12 +2919,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
- 	uint32_t old_write_domain, old_read_domains;
- 	int ret;
- 
--	ret = i915_gem_object_flush_gpu_write_domain(obj);
--	if (ret)
--		return ret;
--
--	/* Wait on any GPU rendering and flushing to occur. */
--	ret = i915_gem_object_wait_rendering(obj);
-+	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- 	if (ret != 0)
- 		return ret;
- 
-@@ -2821,6 +2930,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
- 	 */
- 	i915_gem_object_set_to_full_cpu_read_domain(obj);
- 
-+	if (write) {
-+		ret = i915_gem_object_wait_rendering(obj, true);
-+		if (ret)
-+			return ret;
-+	}
-+
- 	old_write_domain = obj->write_domain;
- 	old_read_domains = obj->read_domains;
- 
-@@ -2840,7 +2955,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
- 	 * need to be invalidated at next use.
- 	 */
- 	if (write) {
--		obj->read_domains &= I915_GEM_DOMAIN_CPU;
-+		obj->read_domains = I915_GEM_DOMAIN_CPU;
- 		obj->write_domain = I915_GEM_DOMAIN_CPU;
- 	}
- 
-@@ -2963,26 +3078,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
-  *		drm_agp_chipset_flush
-  */
- static void
--i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
-+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-+				  struct intel_ring_buffer *ring)
- {
- 	struct drm_device		*dev = obj->dev;
--	drm_i915_private_t		*dev_priv = dev->dev_private;
-+	struct drm_i915_private		*dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
- 	uint32_t			invalidate_domains = 0;
- 	uint32_t			flush_domains = 0;
- 	uint32_t			old_read_domains;
- 
--	BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
--	BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
--
- 	intel_mark_busy(dev, obj);
- 
--#if WATCH_BUF
--	DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
--		 __func__, obj,
--		 obj->read_domains, obj->pending_read_domains,
--		 obj->write_domain, obj->pending_write_domain);
--#endif
- 	/*
- 	 * If the object isn't moving to a new write domain,
- 	 * let the object stay in multiple read domains
-@@ -2999,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
- 	 * write domain
- 	 */
- 	if (obj->write_domain &&
--	    obj->write_domain != obj->pending_read_domains) {
-+	    (obj->write_domain != obj->pending_read_domains ||
-+	     obj_priv->ring != ring)) {
- 		flush_domains |= obj->write_domain;
- 		invalidate_domains |=
- 			obj->pending_read_domains & ~obj->write_domain;
-@@ -3009,13 +3117,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
- 	 * stale data. That is, any new read domains.
- 	 */
- 	invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
--	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
--#if WATCH_BUF
--		DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
--			 __func__, flush_domains, invalidate_domains);
--#endif
-+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- 		i915_gem_clflush_object(obj);
--	}
- 
- 	old_read_domains = obj->read_domains;
- 
-@@ -3029,21 +3132,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
- 		obj->pending_write_domain = obj->write_domain;
- 	obj->read_domains = obj->pending_read_domains;
- 
--	if (flush_domains & I915_GEM_GPU_DOMAINS) {
--		if (obj_priv->ring == &dev_priv->render_ring)
--			dev_priv->flush_rings |= FLUSH_RENDER_RING;
--		else if (obj_priv->ring == &dev_priv->bsd_ring)
--			dev_priv->flush_rings |= FLUSH_BSD_RING;
--	}
--
- 	dev->invalidate_domains |= invalidate_domains;
- 	dev->flush_domains |= flush_domains;
--#if WATCH_BUF
--	DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
--		 __func__,
--		 obj->read_domains, obj->write_domain,
--		 dev->invalidate_domains, dev->flush_domains);
--#endif
-+	if (flush_domains & I915_GEM_GPU_DOMAINS)
-+		dev_priv->mm.flush_rings |= obj_priv->ring->id;
-+	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-+		dev_priv->mm.flush_rings |= ring->id;
- 
- 	trace_i915_gem_object_change_domain(obj,
- 					    old_read_domains,
-@@ -3106,12 +3200,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- 	if (offset == 0 && size == obj->size)
- 		return i915_gem_object_set_to_cpu_domain(obj, 0);
- 
--	ret = i915_gem_object_flush_gpu_write_domain(obj);
--	if (ret)
--		return ret;
--
--	/* Wait on any GPU rendering and flushing to occur. */
--	ret = i915_gem_object_wait_rendering(obj);
-+	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- 	if (ret != 0)
- 		return ret;
- 	i915_gem_object_flush_gtt_write_domain(obj);
-@@ -3164,66 +3253,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
-  * Pin an object to the GTT and evaluate the relocations landing in it.
-  */
- static int
--i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
--				 struct drm_file *file_priv,
--				 struct drm_i915_gem_exec_object2 *entry,
--				 struct drm_i915_gem_relocation_entry *relocs)
-+i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
-+			     struct drm_file *file_priv,
-+			     struct drm_i915_gem_exec_object2 *entry)
- {
--	struct drm_device *dev = obj->dev;
-+	struct drm_device *dev = obj->base.dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
--	int i, ret;
--	void __iomem *reloc_page;
--	bool need_fence;
--
--	need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
--	             obj_priv->tiling_mode != I915_TILING_NONE;
--
--	/* Check fence reg constraints and rebind if necessary */
--	if (need_fence &&
--	    !i915_gem_object_fence_offset_ok(obj,
--					     obj_priv->tiling_mode)) {
--		ret = i915_gem_object_unbind(obj);
--		if (ret)
--			return ret;
--	}
-+	struct drm_i915_gem_relocation_entry __user *user_relocs;
-+	struct drm_gem_object *target_obj = NULL;
-+	uint32_t target_handle = 0;
-+	int i, ret = 0;
- 
--	/* Choose the GTT offset for our buffer and put it there. */
--	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
--	if (ret)
--		return ret;
-+	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
-+	for (i = 0; i < entry->relocation_count; i++) {
-+		struct drm_i915_gem_relocation_entry reloc;
-+		uint32_t target_offset;
- 
--	/*
--	 * Pre-965 chips need a fence register set up in order to
--	 * properly handle blits to/from tiled surfaces.
--	 */
--	if (need_fence) {
--		ret = i915_gem_object_get_fence_reg(obj);
--		if (ret != 0) {
--			i915_gem_object_unpin(obj);
--			return ret;
-+		if (__copy_from_user_inatomic(&reloc,
-+					      user_relocs+i,
-+					      sizeof(reloc))) {
-+			ret = -EFAULT;
-+			break;
- 		}
--	}
- 
--	entry->offset = obj_priv->gtt_offset;
-+		if (reloc.target_handle != target_handle) {
-+			drm_gem_object_unreference(target_obj);
- 
--	/* Apply the relocations, using the GTT aperture to avoid cache
--	 * flushing requirements.
--	 */
--	for (i = 0; i < entry->relocation_count; i++) {
--		struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
--		struct drm_gem_object *target_obj;
--		struct drm_i915_gem_object *target_obj_priv;
--		uint32_t reloc_val, reloc_offset;
--		uint32_t __iomem *reloc_entry;
--
--		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
--						   reloc->target_handle);
--		if (target_obj == NULL) {
--			i915_gem_object_unpin(obj);
--			return -ENOENT;
-+			target_obj = drm_gem_object_lookup(dev, file_priv,
-+							   reloc.target_handle);
-+			if (target_obj == NULL) {
-+				ret = -ENOENT;
-+				break;
-+			}
-+
-+			target_handle = reloc.target_handle;
- 		}
--		target_obj_priv = to_intel_bo(target_obj);
-+		target_offset = to_intel_bo(target_obj)->gtt_offset;
- 
- #if WATCH_RELOC
- 		DRM_INFO("%s: obj %p offset %08x target %d "
-@@ -3231,268 +3296,313 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- 			 "presumed %08x delta %08x\n",
- 			 __func__,
- 			 obj,
--			 (int) reloc->offset,
--			 (int) reloc->target_handle,
--			 (int) reloc->read_domains,
--			 (int) reloc->write_domain,
--			 (int) target_obj_priv->gtt_offset,
--			 (int) reloc->presumed_offset,
--			 reloc->delta);
-+			 (int) reloc.offset,
-+			 (int) reloc.target_handle,
-+			 (int) reloc.read_domains,
-+			 (int) reloc.write_domain,
-+			 (int) target_offset,
-+			 (int) reloc.presumed_offset,
-+			 reloc.delta);
- #endif
- 
- 		/* The target buffer should have appeared before us in the
- 		 * exec_object list, so it should have a GTT space bound by now.
- 		 */
--		if (target_obj_priv->gtt_space == NULL) {
-+		if (target_offset == 0) {
- 			DRM_ERROR("No GTT space found for object %d\n",
--				  reloc->target_handle);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  reloc.target_handle);
-+			ret = -EINVAL;
-+			break;
- 		}
- 
- 		/* Validate that the target is in a valid r/w GPU domain */
--		if (reloc->write_domain & (reloc->write_domain - 1)) {
-+		if (reloc.write_domain & (reloc.write_domain - 1)) {
- 			DRM_ERROR("reloc with multiple write domains: "
- 				  "obj %p target %d offset %d "
- 				  "read %08x write %08x",
--				  obj, reloc->target_handle,
--				  (int) reloc->offset,
--				  reloc->read_domains,
--				  reloc->write_domain);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  obj, reloc.target_handle,
-+				  (int) reloc.offset,
-+				  reloc.read_domains,
-+				  reloc.write_domain);
-+			ret = -EINVAL;
-+			break;
- 		}
--		if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
--		    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
-+		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
-+		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- 			DRM_ERROR("reloc with read/write CPU domains: "
- 				  "obj %p target %d offset %d "
- 				  "read %08x write %08x",
--				  obj, reloc->target_handle,
--				  (int) reloc->offset,
--				  reloc->read_domains,
--				  reloc->write_domain);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  obj, reloc.target_handle,
-+				  (int) reloc.offset,
-+				  reloc.read_domains,
-+				  reloc.write_domain);
-+			ret = -EINVAL;
-+			break;
- 		}
--		if (reloc->write_domain && target_obj->pending_write_domain &&
--		    reloc->write_domain != target_obj->pending_write_domain) {
-+		if (reloc.write_domain && target_obj->pending_write_domain &&
-+		    reloc.write_domain != target_obj->pending_write_domain) {
- 			DRM_ERROR("Write domain conflict: "
- 				  "obj %p target %d offset %d "
- 				  "new %08x old %08x\n",
--				  obj, reloc->target_handle,
--				  (int) reloc->offset,
--				  reloc->write_domain,
-+				  obj, reloc.target_handle,
-+				  (int) reloc.offset,
-+				  reloc.write_domain,
- 				  target_obj->pending_write_domain);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+			ret = -EINVAL;
-+			break;
- 		}
- 
--		target_obj->pending_read_domains |= reloc->read_domains;
--		target_obj->pending_write_domain |= reloc->write_domain;
-+		target_obj->pending_read_domains |= reloc.read_domains;
-+		target_obj->pending_write_domain |= reloc.write_domain;
- 
- 		/* If the relocation already has the right value in it, no
- 		 * more work needs to be done.
- 		 */
--		if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
--			drm_gem_object_unreference(target_obj);
-+		if (target_offset == reloc.presumed_offset)
- 			continue;
--		}
- 
- 		/* Check that the relocation address is valid... */
--		if (reloc->offset > obj->size - 4) {
-+		if (reloc.offset > obj->base.size - 4) {
- 			DRM_ERROR("Relocation beyond object bounds: "
- 				  "obj %p target %d offset %d size %d.\n",
--				  obj, reloc->target_handle,
--				  (int) reloc->offset, (int) obj->size);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  obj, reloc.target_handle,
-+				  (int) reloc.offset, (int) obj->base.size);
-+			ret = -EINVAL;
-+			break;
- 		}
--		if (reloc->offset & 3) {
-+		if (reloc.offset & 3) {
- 			DRM_ERROR("Relocation not 4-byte aligned: "
- 				  "obj %p target %d offset %d.\n",
--				  obj, reloc->target_handle,
--				  (int) reloc->offset);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  obj, reloc.target_handle,
-+				  (int) reloc.offset);
-+			ret = -EINVAL;
-+			break;
- 		}
- 
- 		/* and points to somewhere within the target object. */
--		if (reloc->delta >= target_obj->size) {
-+		if (reloc.delta >= target_obj->size) {
- 			DRM_ERROR("Relocation beyond target object bounds: "
- 				  "obj %p target %d delta %d size %d.\n",
--				  obj, reloc->target_handle,
--				  (int) reloc->delta, (int) target_obj->size);
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
-+				  obj, reloc.target_handle,
-+				  (int) reloc.delta, (int) target_obj->size);
-+			ret = -EINVAL;
-+			break;
- 		}
- 
--		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
--		if (ret != 0) {
--			drm_gem_object_unreference(target_obj);
--			i915_gem_object_unpin(obj);
--			return -EINVAL;
--		}
-+		reloc.delta += target_offset;
-+		if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
-+			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
-+			char *vaddr;
- 
--		/* Map the page containing the relocation we're going to
--		 * perform.
--		 */
--		reloc_offset = obj_priv->gtt_offset + reloc->offset;
--		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
--						      (reloc_offset &
--						       ~(PAGE_SIZE - 1)),
--						      KM_USER0);
--		reloc_entry = (uint32_t __iomem *)(reloc_page +
--						   (reloc_offset & (PAGE_SIZE - 1)));
--		reloc_val = target_obj_priv->gtt_offset + reloc->delta;
--
--#if WATCH_BUF
--		DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
--			  obj, (unsigned int) reloc->offset,
--			  readl(reloc_entry), reloc_val);
--#endif
--		writel(reloc_val, reloc_entry);
--		io_mapping_unmap_atomic(reloc_page, KM_USER0);
-+			vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
-+			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
-+			kunmap_atomic(vaddr, KM_USER0);
-+		} else {
-+			uint32_t __iomem *reloc_entry;
-+			void __iomem *reloc_page;
- 
--		/* The updated presumed offset for this entry will be
--		 * copied back out to the user.
--		 */
--		reloc->presumed_offset = target_obj_priv->gtt_offset;
-+			ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
-+			if (ret)
-+				break;
-+
-+			/* Map the page containing the relocation we're going to perform.  */
-+			reloc.offset += obj->gtt_offset;
-+			reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-+							      reloc.offset & PAGE_MASK,
-+							      KM_USER0);
-+			reloc_entry = (uint32_t __iomem *)
-+				(reloc_page + (reloc.offset & ~PAGE_MASK));
-+			iowrite32(reloc.delta, reloc_entry);
-+			io_mapping_unmap_atomic(reloc_page, KM_USER0);
-+		}
- 
--		drm_gem_object_unreference(target_obj);
-+		/* and update the user's relocation entry */
-+		reloc.presumed_offset = target_offset;
-+		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
-+					      &reloc.presumed_offset,
-+					      sizeof(reloc.presumed_offset))) {
-+		    ret = -EFAULT;
-+		    break;
-+		}
- 	}
- 
--#if WATCH_BUF
--	if (0)
--		i915_gem_dump_object(obj, 128, __func__, ~0);
--#endif
--	return 0;
-+	drm_gem_object_unreference(target_obj);
-+	return ret;
- }
- 
--/* Throttle our rendering by waiting until the ring has completed our requests
-- * emitted over 20 msec ago.
-- *
-- * Note that if we were to use the current jiffies each time around the loop,
-- * we wouldn't escape the function with any frames outstanding if the time to
-- * render a frame was over 20ms.
-- *
-- * This should get us reasonable parallelism between CPU and GPU but also
-- * relatively low latency when blocking on a particular request to finish.
-- */
- static int
--i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-+i915_gem_execbuffer_pin(struct drm_device *dev,
-+			struct drm_file *file,
-+			struct drm_gem_object **object_list,
-+			struct drm_i915_gem_exec_object2 *exec_list,
-+			int count)
- {
--	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
--	int ret = 0;
--	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret, i, retry;
- 
--	mutex_lock(&dev->struct_mutex);
--	while (!list_empty(&i915_file_priv->mm.request_list)) {
--		struct drm_i915_gem_request *request;
-+	/* attempt to pin all of the buffers into the GTT */
-+	for (retry = 0; retry < 2; retry++) {
-+		ret = 0;
-+		for (i = 0; i < count; i++) {
-+			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
-+			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
-+			bool need_fence =
-+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-+				obj->tiling_mode != I915_TILING_NONE;
-+
-+			/* Check fence reg constraints and rebind if necessary */
-+			if (need_fence &&
-+			    !i915_gem_object_fence_offset_ok(&obj->base,
-+							     obj->tiling_mode)) {
-+				ret = i915_gem_object_unbind(&obj->base);
-+				if (ret)
-+					break;
-+			}
- 
--		request = list_first_entry(&i915_file_priv->mm.request_list,
--					   struct drm_i915_gem_request,
--					   client_list);
-+			ret = i915_gem_object_pin(&obj->base, entry->alignment);
-+			if (ret)
-+				break;
- 
--		if (time_after_eq(request->emitted_jiffies, recent_enough))
--			break;
-+			/*
-+			 * Pre-965 chips need a fence register set up in order
-+			 * to properly handle blits to/from tiled surfaces.
-+			 */
-+			if (need_fence) {
-+				ret = i915_gem_object_get_fence_reg(&obj->base, true);
-+				if (ret) {
-+					i915_gem_object_unpin(&obj->base);
-+					break;
-+				}
-+
-+				dev_priv->fence_regs[obj->fence_reg].gpu = true;
-+			}
-+
-+			entry->offset = obj->gtt_offset;
-+		}
- 
--		ret = i915_wait_request(dev, request->seqno, request->ring);
--		if (ret != 0)
-+		while (i--)
-+			i915_gem_object_unpin(object_list[i]);
-+
-+		if (ret == 0)
- 			break;
-+
-+		if (ret != -ENOSPC || retry)
-+			return ret;
-+
-+		ret = i915_gem_evict_everything(dev);
-+		if (ret)
-+			return ret;
- 	}
--	mutex_unlock(&dev->struct_mutex);
- 
--	return ret;
-+	return 0;
- }
- 
- static int
--i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
--			      uint32_t buffer_count,
--			      struct drm_i915_gem_relocation_entry **relocs)
-+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
-+				struct drm_file *file,
-+				struct intel_ring_buffer *ring,
-+				struct drm_gem_object **objects,
-+				int count)
- {
--	uint32_t reloc_count = 0, reloc_index = 0, i;
--	int ret;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret, i;
- 
--	*relocs = NULL;
--	for (i = 0; i < buffer_count; i++) {
--		if (reloc_count + exec_list[i].relocation_count < reloc_count)
--			return -EINVAL;
--		reloc_count += exec_list[i].relocation_count;
--	}
-+	/* Zero the global flush/invalidate flags. These
-+	 * will be modified as new domains are computed
-+	 * for each object
-+	 */
-+	dev->invalidate_domains = 0;
-+	dev->flush_domains = 0;
-+	dev_priv->mm.flush_rings = 0;
-+	for (i = 0; i < count; i++)
-+		i915_gem_object_set_to_gpu_domain(objects[i], ring);
- 
--	*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
--	if (*relocs == NULL) {
--		DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
--		return -ENOMEM;
-+	if (dev->invalidate_domains | dev->flush_domains) {
-+#if WATCH_EXEC
-+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-+			  __func__,
-+			 dev->invalidate_domains,
-+			 dev->flush_domains);
-+#endif
-+		i915_gem_flush(dev, file,
-+			       dev->invalidate_domains,
-+			       dev->flush_domains,
-+			       dev_priv->mm.flush_rings);
- 	}
- 
--	for (i = 0; i < buffer_count; i++) {
--		struct drm_i915_gem_relocation_entry __user *user_relocs;
--
--		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
--
--		ret = copy_from_user(&(*relocs)[reloc_index],
--				     user_relocs,
--				     exec_list[i].relocation_count *
--				     sizeof(**relocs));
--		if (ret != 0) {
--			drm_free_large(*relocs);
--			*relocs = NULL;
--			return -EFAULT;
-+	for (i = 0; i < count; i++) {
-+		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
-+		/* XXX replace with semaphores */
-+		if (obj->ring && ring != obj->ring) {
-+			ret = i915_gem_object_wait_rendering(&obj->base, true);
-+			if (ret)
-+				return ret;
- 		}
--
--		reloc_index += exec_list[i].relocation_count;
- 	}
- 
- 	return 0;
- }
- 
-+/* Throttle our rendering by waiting until the ring has completed our requests
-+ * emitted over 20 msec ago.
-+ *
-+ * Note that if we were to use the current jiffies each time around the loop,
-+ * we wouldn't escape the function with any frames outstanding if the time to
-+ * render a frame was over 20ms.
-+ *
-+ * This should get us reasonable parallelism between CPU and GPU but also
-+ * relatively low latency when blocking on a particular request to finish.
-+ */
- static int
--i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
--			    uint32_t buffer_count,
--			    struct drm_i915_gem_relocation_entry *relocs)
-+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
- {
--	uint32_t reloc_count = 0, i;
--	int ret = 0;
--
--	if (relocs == NULL)
--	    return 0;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct drm_i915_file_private *file_priv = file->driver_priv;
-+	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-+	struct drm_i915_gem_request *request;
-+	struct intel_ring_buffer *ring = NULL;
-+	u32 seqno = 0;
-+	int ret;
- 
--	for (i = 0; i < buffer_count; i++) {
--		struct drm_i915_gem_relocation_entry __user *user_relocs;
--		int unwritten;
-+	spin_lock(&file_priv->mm.lock);
-+	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
-+		if (time_after_eq(request->emitted_jiffies, recent_enough))
-+			break;
- 
--		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-+		ring = request->ring;
-+		seqno = request->seqno;
-+	}
-+	spin_unlock(&file_priv->mm.lock);
- 
--		unwritten = copy_to_user(user_relocs,
--					 &relocs[reloc_count],
--					 exec_list[i].relocation_count *
--					 sizeof(*relocs));
-+	if (seqno == 0)
-+		return 0;
- 
--		if (unwritten) {
--			ret = -EFAULT;
--			goto err;
--		}
-+	ret = 0;
-+	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
-+		/* And wait for the seqno passing without holding any locks and
-+		 * causing extra latency for others. This is safe as the irq
-+		 * generation is designed to be run atomically and so is
-+		 * lockless.
-+		 */
-+		ring->user_irq_get(dev, ring);
-+		ret = wait_event_interruptible(ring->irq_queue,
-+					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
-+					       || atomic_read(&dev_priv->mm.wedged));
-+		ring->user_irq_put(dev, ring);
- 
--		reloc_count += exec_list[i].relocation_count;
-+		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-+			ret = -EIO;
- 	}
- 
--err:
--	drm_free_large(relocs);
-+	if (ret == 0)
-+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- 
- 	return ret;
- }
- 
- static int
--i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
--			   uint64_t exec_offset)
-+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
-+			  uint64_t exec_offset)
- {
- 	uint32_t exec_start, exec_len;
- 
-@@ -3509,44 +3619,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
- }
- 
- static int
--i915_gem_wait_for_pending_flip(struct drm_device *dev,
--			       struct drm_gem_object **object_list,
--			       int count)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	struct drm_i915_gem_object *obj_priv;
--	DEFINE_WAIT(wait);
--	int i, ret = 0;
-+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
-+		   int count)
-+{
-+	int i;
- 
--	for (;;) {
--		prepare_to_wait(&dev_priv->pending_flip_queue,
--				&wait, TASK_INTERRUPTIBLE);
--		for (i = 0; i < count; i++) {
--			obj_priv = to_intel_bo(object_list[i]);
--			if (atomic_read(&obj_priv->pending_flip) > 0)
--				break;
--		}
--		if (i == count)
--			break;
-+	for (i = 0; i < count; i++) {
-+		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
-+		size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
- 
--		if (!signal_pending(current)) {
--			mutex_unlock(&dev->struct_mutex);
--			schedule();
--			mutex_lock(&dev->struct_mutex);
--			continue;
--		}
--		ret = -ERESTARTSYS;
--		break;
-+		if (!access_ok(VERIFY_READ, ptr, length))
-+			return -EFAULT;
-+
-+		/* we may also need to update the presumed offsets */
-+		if (!access_ok(VERIFY_WRITE, ptr, length))
-+			return -EFAULT;
-+
-+		if (fault_in_pages_readable(ptr, length))
-+			return -EFAULT;
- 	}
--	finish_wait(&dev_priv->pending_flip_queue, &wait);
- 
--	return ret;
-+	return 0;
- }
- 
--
--int
-+static int
- i915_gem_do_execbuffer(struct drm_device *dev, void *data,
--		       struct drm_file *file_priv,
-+		       struct drm_file *file,
- 		       struct drm_i915_gem_execbuffer2 *args,
- 		       struct drm_i915_gem_exec_object2 *exec_list)
- {
-@@ -3555,26 +3653,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	struct drm_gem_object *batch_obj;
- 	struct drm_i915_gem_object *obj_priv;
- 	struct drm_clip_rect *cliprects = NULL;
--	struct drm_i915_gem_relocation_entry *relocs = NULL;
--	int ret = 0, ret2, i, pinned = 0;
-+	struct drm_i915_gem_request *request = NULL;
-+	int ret, i, flips;
- 	uint64_t exec_offset;
--	uint32_t seqno, flush_domains, reloc_index;
--	int pin_tries, flips;
- 
- 	struct intel_ring_buffer *ring = NULL;
- 
-+	ret = i915_gem_check_is_wedged(dev);
-+	if (ret)
-+		return ret;
-+
-+	ret = validate_exec_list(exec_list, args->buffer_count);
-+	if (ret)
-+		return ret;
-+
- #if WATCH_EXEC
- 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- 		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
- #endif
--	if (args->flags & I915_EXEC_BSD) {
-+	switch (args->flags & I915_EXEC_RING_MASK) {
-+	case I915_EXEC_DEFAULT:
-+	case I915_EXEC_RENDER:
-+		ring = &dev_priv->render_ring;
-+		break;
-+	case I915_EXEC_BSD:
- 		if (!HAS_BSD(dev)) {
--			DRM_ERROR("execbuf with wrong flag\n");
-+			DRM_ERROR("execbuf with invalid ring (BSD)\n");
- 			return -EINVAL;
- 		}
- 		ring = &dev_priv->bsd_ring;
--	} else {
--		ring = &dev_priv->render_ring;
-+		break;
-+	case I915_EXEC_BLT:
-+		if (!HAS_BLT(dev)) {
-+			DRM_ERROR("execbuf with invalid ring (BLT)\n");
-+			return -EINVAL;
-+		}
-+		ring = &dev_priv->blt_ring;
-+		break;
-+	default:
-+		DRM_ERROR("execbuf with unknown ring: %d\n",
-+			  (int)(args->flags & I915_EXEC_RING_MASK));
-+		return -EINVAL;
- 	}
- 
- 	if (args->buffer_count < 1) {
-@@ -3609,20 +3728,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 		}
- 	}
- 
--	ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
--					    &relocs);
--	if (ret != 0)
-+	request = kzalloc(sizeof(*request), GFP_KERNEL);
-+	if (request == NULL) {
-+		ret = -ENOMEM;
- 		goto pre_mutex_err;
-+	}
- 
--	mutex_lock(&dev->struct_mutex);
--
--	i915_verify_inactive(dev, __FILE__, __LINE__);
--
--	if (atomic_read(&dev_priv->mm.wedged)) {
--		mutex_unlock(&dev->struct_mutex);
--		ret = -EIO;
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
- 		goto pre_mutex_err;
--	}
- 
- 	if (dev_priv->mm.suspended) {
- 		mutex_unlock(&dev->struct_mutex);
-@@ -3631,9 +3745,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	}
- 
- 	/* Look up object handles */
--	flips = 0;
- 	for (i = 0; i < args->buffer_count; i++) {
--		object_list[i] = drm_gem_object_lookup(dev, file_priv,
-+		object_list[i] = drm_gem_object_lookup(dev, file,
- 						       exec_list[i].handle);
- 		if (object_list[i] == NULL) {
- 			DRM_ERROR("Invalid object handle %d at index %d\n",
-@@ -3654,75 +3767,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 			goto err;
- 		}
- 		obj_priv->in_execbuffer = true;
--		flips += atomic_read(&obj_priv->pending_flip);
--	}
--
--	if (flips > 0) {
--		ret = i915_gem_wait_for_pending_flip(dev, object_list,
--						     args->buffer_count);
--		if (ret)
--			goto err;
- 	}
- 
--	/* Pin and relocate */
--	for (pin_tries = 0; ; pin_tries++) {
--		ret = 0;
--		reloc_index = 0;
--
--		for (i = 0; i < args->buffer_count; i++) {
--			object_list[i]->pending_read_domains = 0;
--			object_list[i]->pending_write_domain = 0;
--			ret = i915_gem_object_pin_and_relocate(object_list[i],
--							       file_priv,
--							       &exec_list[i],
--							       &relocs[reloc_index]);
--			if (ret)
--				break;
--			pinned = i + 1;
--			reloc_index += exec_list[i].relocation_count;
--		}
--		/* success */
--		if (ret == 0)
--			break;
--
--		/* error other than GTT full, or we've already tried again */
--		if (ret != -ENOSPC || pin_tries >= 1) {
--			if (ret != -ERESTARTSYS) {
--				unsigned long long total_size = 0;
--				int num_fences = 0;
--				for (i = 0; i < args->buffer_count; i++) {
--					obj_priv = to_intel_bo(object_list[i]);
--
--					total_size += object_list[i]->size;
--					num_fences +=
--						exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
--						obj_priv->tiling_mode != I915_TILING_NONE;
--				}
--				DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
--					  pinned+1, args->buffer_count,
--					  total_size, num_fences,
--					  ret);
--				DRM_ERROR("%d objects [%d pinned], "
--					  "%d object bytes [%d pinned], "
--					  "%d/%d gtt bytes\n",
--					  atomic_read(&dev->object_count),
--					  atomic_read(&dev->pin_count),
--					  atomic_read(&dev->object_memory),
--					  atomic_read(&dev->pin_memory),
--					  atomic_read(&dev->gtt_memory),
--					  dev->gtt_total);
--			}
--			goto err;
--		}
--
--		/* unpin all of our buffers */
--		for (i = 0; i < pinned; i++)
--			i915_gem_object_unpin(object_list[i]);
--		pinned = 0;
-+	/* Move the objects en-masse into the GTT, evicting if necessary. */
-+	ret = i915_gem_execbuffer_pin(dev, file,
-+				      object_list, exec_list,
-+				      args->buffer_count);
-+	if (ret)
-+		goto err;
- 
--		/* evict everyone we can from the aperture */
--		ret = i915_gem_evict_everything(dev);
--		if (ret && ret != -ENOSPC)
-+	/* The objects are in their final locations, apply the relocations. */
-+	for (i = 0; i < args->buffer_count; i++) {
-+		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-+		obj->base.pending_read_domains = 0;
-+		obj->base.pending_write_domain = 0;
-+		ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
-+		if (ret)
- 			goto err;
- 	}
- 
-@@ -3735,72 +3795,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	}
- 	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
- 
--	/* Sanity check the batch buffer, prior to moving objects */
--	exec_offset = exec_list[args->buffer_count - 1].offset;
--	ret = i915_gem_check_execbuffer (args, exec_offset);
-+	/* Sanity check the batch buffer */
-+	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
-+	ret = i915_gem_check_execbuffer(args, exec_offset);
- 	if (ret != 0) {
- 		DRM_ERROR("execbuf with invalid offset/length\n");
- 		goto err;
- 	}
- 
--	i915_verify_inactive(dev, __FILE__, __LINE__);
--
--	/* Zero the global flush/invalidate flags. These
--	 * will be modified as new domains are computed
--	 * for each object
--	 */
--	dev->invalidate_domains = 0;
--	dev->flush_domains = 0;
--	dev_priv->flush_rings = 0;
--
--	for (i = 0; i < args->buffer_count; i++) {
--		struct drm_gem_object *obj = object_list[i];
--
--		/* Compute new gpu domains and update invalidate/flush */
--		i915_gem_object_set_to_gpu_domain(obj);
--	}
--
--	i915_verify_inactive(dev, __FILE__, __LINE__);
--
--	if (dev->invalidate_domains | dev->flush_domains) {
--#if WATCH_EXEC
--		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
--			  __func__,
--			 dev->invalidate_domains,
--			 dev->flush_domains);
--#endif
--		i915_gem_flush(dev,
--			       dev->invalidate_domains,
--			       dev->flush_domains);
--		if (dev_priv->flush_rings & FLUSH_RENDER_RING)
--			(void)i915_add_request(dev, file_priv,
--					       dev->flush_domains,
--					       &dev_priv->render_ring);
--		if (dev_priv->flush_rings & FLUSH_BSD_RING)
--			(void)i915_add_request(dev, file_priv,
--					       dev->flush_domains,
--					       &dev_priv->bsd_ring);
--	}
-+	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
-+					      object_list, args->buffer_count);
-+	if (ret)
-+		goto err;
- 
- 	for (i = 0; i < args->buffer_count; i++) {
- 		struct drm_gem_object *obj = object_list[i];
--		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 		uint32_t old_write_domain = obj->write_domain;
--
- 		obj->write_domain = obj->pending_write_domain;
--		if (obj->write_domain)
--			list_move_tail(&obj_priv->gpu_write_list,
--				       &dev_priv->mm.gpu_write_list);
--		else
--			list_del_init(&obj_priv->gpu_write_list);
--
- 		trace_i915_gem_object_change_domain(obj,
- 						    obj->read_domains,
- 						    old_write_domain);
- 	}
- 
--	i915_verify_inactive(dev, __FILE__, __LINE__);
--
- #if WATCH_COHERENCY
- 	for (i = 0; i < args->buffer_count; i++) {
- 		i915_gem_object_check_coherency(object_list[i],
-@@ -3815,9 +3831,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 			      ~0);
- #endif
- 
-+	/* Check for any pending flips. As we only maintain a flip queue depth
-+	 * of 1, we can simply insert a WAIT for the next display flip prior
-+	 * to executing the batch and avoid stalling the CPU.
-+	 */
-+	flips = 0;
-+	for (i = 0; i < args->buffer_count; i++) {
-+		if (object_list[i]->write_domain)
-+			flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
-+	}
-+	if (flips) {
-+		int plane, flip_mask;
-+
-+		for (plane = 0; flips >> plane; plane++) {
-+			if (((flips >> plane) & 1) == 0)
-+				continue;
-+
-+			if (plane)
-+				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-+			else
-+				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-+
-+			intel_ring_begin(dev, ring, 2);
-+			intel_ring_emit(dev, ring,
-+					MI_WAIT_FOR_EVENT | flip_mask);
-+			intel_ring_emit(dev, ring, MI_NOOP);
-+			intel_ring_advance(dev, ring);
-+		}
-+	}
-+
- 	/* Exec the batchbuffer */
- 	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
--			cliprects, exec_offset);
-+					    cliprects, exec_offset);
- 	if (ret) {
- 		DRM_ERROR("dispatch failed %d\n", ret);
- 		goto err;
-@@ -3827,38 +3872,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	 * Ensure that the commands in the batch buffer are
- 	 * finished before the interrupt fires
- 	 */
--	flush_domains = i915_retire_commands(dev, ring);
--
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	i915_retire_commands(dev, ring);
- 
--	/*
--	 * Get a seqno representing the execution of the current buffer,
--	 * which we can wait on.  We would like to mitigate these interrupts,
--	 * likely by only creating seqnos occasionally (so that we have
--	 * *some* interrupts representing completion of buffers that we can
--	 * wait on when trying to clear up gtt space).
--	 */
--	seqno = i915_add_request(dev, file_priv, flush_domains, ring);
--	BUG_ON(seqno == 0);
- 	for (i = 0; i < args->buffer_count; i++) {
- 		struct drm_gem_object *obj = object_list[i];
--		obj_priv = to_intel_bo(obj);
- 
--		i915_gem_object_move_to_active(obj, seqno, ring);
--#if WATCH_LRU
--		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
--#endif
-+		i915_gem_object_move_to_active(obj, ring);
-+		if (obj->write_domain)
-+			list_move_tail(&to_intel_bo(obj)->gpu_write_list,
-+				       &ring->gpu_write_list);
- 	}
--#if WATCH_LRU
--	i915_dump_lru(dev, __func__);
--#endif
- 
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	i915_add_request(dev, file, request, ring);
-+	request = NULL;
- 
- err:
--	for (i = 0; i < pinned; i++)
--		i915_gem_object_unpin(object_list[i]);
--
- 	for (i = 0; i < args->buffer_count; i++) {
- 		if (object_list[i]) {
- 			obj_priv = to_intel_bo(object_list[i]);
-@@ -3870,22 +3898,9 @@ err:
- 	mutex_unlock(&dev->struct_mutex);
- 
- pre_mutex_err:
--	/* Copy the updated relocations out regardless of current error
--	 * state.  Failure to update the relocs would mean that the next
--	 * time userland calls execbuf, it would do so with presumed offset
--	 * state that didn't match the actual object state.
--	 */
--	ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
--					   relocs);
--	if (ret2 != 0) {
--		DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
--
--		if (ret == 0)
--			ret = ret2;
--	}
--
- 	drm_free_large(object_list);
- 	kfree(cliprects);
-+	kfree(request);
- 
- 	return ret;
- }
-@@ -3942,7 +3957,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- 		exec2_list[i].alignment = exec_list[i].alignment;
- 		exec2_list[i].offset = exec_list[i].offset;
--		if (!IS_I965G(dev))
-+		if (INTEL_INFO(dev)->gen < 4)
- 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- 		else
- 			exec2_list[i].flags = 0;
-@@ -4039,12 +4054,12 @@ int
- i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
- {
- 	struct drm_device *dev = obj->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 	int ret;
- 
- 	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
--
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	WARN_ON(i915_verify_lists(dev));
- 
- 	if (obj_priv->gtt_space != NULL) {
- 		if (alignment == 0)
-@@ -4072,14 +4087,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
- 	 * remove it from the inactive list
- 	 */
- 	if (obj_priv->pin_count == 1) {
--		atomic_inc(&dev->pin_count);
--		atomic_add(obj->size, &dev->pin_memory);
--		if (!obj_priv->active &&
--		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
--			list_del_init(&obj_priv->list);
-+		i915_gem_info_add_pin(dev_priv, obj->size);
-+		if (!obj_priv->active)
-+			list_move_tail(&obj_priv->mm_list,
-+				       &dev_priv->mm.pinned_list);
- 	}
--	i915_verify_inactive(dev, __FILE__, __LINE__);
- 
-+	WARN_ON(i915_verify_lists(dev));
- 	return 0;
- }
- 
-@@ -4090,7 +4104,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	WARN_ON(i915_verify_lists(dev));
- 	obj_priv->pin_count--;
- 	BUG_ON(obj_priv->pin_count < 0);
- 	BUG_ON(obj_priv->gtt_space == NULL);
-@@ -4100,14 +4114,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
- 	 * the inactive list
- 	 */
- 	if (obj_priv->pin_count == 0) {
--		if (!obj_priv->active &&
--		    (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
--			list_move_tail(&obj_priv->list,
-+		if (!obj_priv->active)
-+			list_move_tail(&obj_priv->mm_list,
- 				       &dev_priv->mm.inactive_list);
--		atomic_dec(&dev->pin_count);
--		atomic_sub(obj->size, &dev->pin_memory);
-+		i915_gem_info_remove_pin(dev_priv, obj->size);
- 	}
--	i915_verify_inactive(dev, __FILE__, __LINE__);
-+	WARN_ON(i915_verify_lists(dev));
- }
- 
- int
-@@ -4119,41 +4131,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- 	struct drm_i915_gem_object *obj_priv;
- 	int ret;
- 
--	mutex_lock(&dev->struct_mutex);
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
- 
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL) {
--		DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
--			  args->handle);
--		mutex_unlock(&dev->struct_mutex);
--		return -ENOENT;
-+		ret = -ENOENT;
-+		goto unlock;
- 	}
- 	obj_priv = to_intel_bo(obj);
- 
- 	if (obj_priv->madv != I915_MADV_WILLNEED) {
- 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
--		drm_gem_object_unreference(obj);
--		mutex_unlock(&dev->struct_mutex);
--		return -EINVAL;
-+		ret = -EINVAL;
-+		goto out;
- 	}
- 
- 	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
- 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
- 			  args->handle);
--		drm_gem_object_unreference(obj);
--		mutex_unlock(&dev->struct_mutex);
--		return -EINVAL;
-+		ret = -EINVAL;
-+		goto out;
- 	}
- 
- 	obj_priv->user_pin_count++;
- 	obj_priv->pin_filp = file_priv;
- 	if (obj_priv->user_pin_count == 1) {
- 		ret = i915_gem_object_pin(obj, args->alignment);
--		if (ret != 0) {
--			drm_gem_object_unreference(obj);
--			mutex_unlock(&dev->struct_mutex);
--			return ret;
--		}
-+		if (ret)
-+			goto out;
- 	}
- 
- 	/* XXX - flush the CPU caches for pinned objects
-@@ -4161,10 +4168,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- 	 */
- 	i915_gem_object_flush_cpu_write_domain(obj);
- 	args->offset = obj_priv->gtt_offset;
-+out:
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
--
--	return 0;
-+	return ret;
- }
- 
- int
-@@ -4174,24 +4182,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- 	struct drm_i915_gem_pin *args = data;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
-+	int ret;
- 
--	mutex_lock(&dev->struct_mutex);
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
- 
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL) {
--		DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
--			  args->handle);
--		mutex_unlock(&dev->struct_mutex);
--		return -ENOENT;
-+		ret = -ENOENT;
-+		goto unlock;
- 	}
--
- 	obj_priv = to_intel_bo(obj);
-+
- 	if (obj_priv->pin_filp != file_priv) {
- 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
- 			  args->handle);
--		drm_gem_object_unreference(obj);
--		mutex_unlock(&dev->struct_mutex);
--		return -EINVAL;
-+		ret = -EINVAL;
-+		goto out;
- 	}
- 	obj_priv->user_pin_count--;
- 	if (obj_priv->user_pin_count == 0) {
-@@ -4199,9 +4207,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- 		i915_gem_object_unpin(obj);
- 	}
- 
-+out:
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
--	return 0;
-+	return ret;
- }
- 
- int
-@@ -4211,22 +4221,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- 	struct drm_i915_gem_busy *args = data;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
-+	int ret;
-+
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
- 
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL) {
--		DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
--			  args->handle);
--		return -ENOENT;
-+		ret = -ENOENT;
-+		goto unlock;
- 	}
--
--	mutex_lock(&dev->struct_mutex);
-+	obj_priv = to_intel_bo(obj);
- 
- 	/* Count all active objects as busy, even if they are currently not used
- 	 * by the gpu. Users of this interface expect objects to eventually
- 	 * become non-busy without any further actions, therefore emit any
- 	 * necessary flushes here.
- 	 */
--	obj_priv = to_intel_bo(obj);
- 	args->busy = obj_priv->active;
- 	if (args->busy) {
- 		/* Unconditionally flush objects, even when the gpu still uses this
-@@ -4234,10 +4246,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- 		 * use this buffer rather sooner than later, so issuing the required
- 		 * flush earlier is beneficial.
- 		 */
--		if (obj->write_domain) {
--			i915_gem_flush(dev, 0, obj->write_domain);
--			(void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
--		}
-+		if (obj->write_domain & I915_GEM_GPU_DOMAINS)
-+			i915_gem_flush_ring(dev, file_priv,
-+					    obj_priv->ring,
-+					    0, obj->write_domain);
- 
- 		/* Update the active list for the hardware's current position.
- 		 * Otherwise this only updates on a delayed timer or when irqs
-@@ -4250,8 +4262,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- 	}
- 
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
--	return 0;
-+	return ret;
- }
- 
- int
-@@ -4268,6 +4281,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- 	struct drm_i915_gem_madvise *args = data;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
-+	int ret;
- 
- 	switch (args->madv) {
- 	case I915_MADV_DONTNEED:
-@@ -4277,22 +4291,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- 	    return -EINVAL;
- 	}
- 
-+	ret = i915_mutex_lock_interruptible(dev);
-+	if (ret)
-+		return ret;
-+
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL) {
--		DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
--			  args->handle);
--		return -ENOENT;
-+		ret = -ENOENT;
-+		goto unlock;
- 	}
--
--	mutex_lock(&dev->struct_mutex);
- 	obj_priv = to_intel_bo(obj);
- 
- 	if (obj_priv->pin_count) {
--		drm_gem_object_unreference(obj);
--		mutex_unlock(&dev->struct_mutex);
--
--		DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
--		return -EINVAL;
-+		ret = -EINVAL;
-+		goto out;
- 	}
- 
- 	if (obj_priv->madv != __I915_MADV_PURGED)
-@@ -4305,15 +4317,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- 
- 	args->retained = obj_priv->madv != __I915_MADV_PURGED;
- 
-+out:
- 	drm_gem_object_unreference(obj);
-+unlock:
- 	mutex_unlock(&dev->struct_mutex);
--
--	return 0;
-+	return ret;
- }
- 
- struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- 					      size_t size)
- {
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj;
- 
- 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-@@ -4325,18 +4339,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- 		return NULL;
- 	}
- 
-+	i915_gem_info_add_obj(dev_priv, size);
-+
- 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- 
- 	obj->agp_type = AGP_USER_MEMORY;
- 	obj->base.driver_private = NULL;
- 	obj->fence_reg = I915_FENCE_REG_NONE;
--	INIT_LIST_HEAD(&obj->list);
-+	INIT_LIST_HEAD(&obj->mm_list);
-+	INIT_LIST_HEAD(&obj->ring_list);
- 	INIT_LIST_HEAD(&obj->gpu_write_list);
- 	obj->madv = I915_MADV_WILLNEED;
- 
--	trace_i915_gem_object_create(&obj->base);
--
- 	return &obj->base;
- }
- 
-@@ -4356,7 +4371,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
- 
- 	ret = i915_gem_object_unbind(obj);
- 	if (ret == -ERESTARTSYS) {
--		list_move(&obj_priv->list,
-+		list_move(&obj_priv->mm_list,
- 			  &dev_priv->mm.deferred_free_list);
- 		return;
- 	}
-@@ -4365,6 +4380,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
- 		i915_gem_free_mmap_offset(obj);
- 
- 	drm_gem_object_release(obj);
-+	i915_gem_info_remove_obj(dev_priv, obj->size);
- 
- 	kfree(obj_priv->page_cpu_valid);
- 	kfree(obj_priv->bit_17);
-@@ -4395,10 +4411,7 @@ i915_gem_idle(struct drm_device *dev)
- 
- 	mutex_lock(&dev->struct_mutex);
- 
--	if (dev_priv->mm.suspended ||
--			(dev_priv->render_ring.gem_object == NULL) ||
--			(HAS_BSD(dev) &&
--			 dev_priv->bsd_ring.gem_object == NULL)) {
-+	if (dev_priv->mm.suspended) {
- 		mutex_unlock(&dev->struct_mutex);
- 		return 0;
- 	}
-@@ -4423,7 +4436,7 @@ i915_gem_idle(struct drm_device *dev)
- 	 * And not confound mm.suspended!
- 	 */
- 	dev_priv->mm.suspended = 1;
--	del_timer(&dev_priv->hangcheck_timer);
-+	del_timer_sync(&dev_priv->hangcheck_timer);
- 
- 	i915_kernel_lost_context(dev);
- 	i915_gem_cleanup_ringbuffer(dev);
-@@ -4503,36 +4516,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	int ret;
- 
--	dev_priv->render_ring = render_ring;
--
--	if (!I915_NEED_GFX_HWS(dev)) {
--		dev_priv->render_ring.status_page.page_addr
--			= dev_priv->status_page_dmah->vaddr;
--		memset(dev_priv->render_ring.status_page.page_addr,
--				0, PAGE_SIZE);
--	}
--
- 	if (HAS_PIPE_CONTROL(dev)) {
- 		ret = i915_gem_init_pipe_control(dev);
- 		if (ret)
- 			return ret;
- 	}
- 
--	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
-+	ret = intel_init_render_ring_buffer(dev);
- 	if (ret)
- 		goto cleanup_pipe_control;
- 
- 	if (HAS_BSD(dev)) {
--		dev_priv->bsd_ring = bsd_ring;
--		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
-+		ret = intel_init_bsd_ring_buffer(dev);
- 		if (ret)
- 			goto cleanup_render_ring;
- 	}
- 
-+	if (HAS_BLT(dev)) {
-+		ret = intel_init_blt_ring_buffer(dev);
-+		if (ret)
-+			goto cleanup_bsd_ring;
-+	}
-+
- 	dev_priv->next_seqno = 1;
- 
- 	return 0;
- 
-+cleanup_bsd_ring:
-+	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- cleanup_render_ring:
- 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- cleanup_pipe_control:
-@@ -4547,8 +4558,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 
- 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
--	if (HAS_BSD(dev))
--		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-+	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-+	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- 	if (HAS_PIPE_CONTROL(dev))
- 		i915_gem_cleanup_pipe_control(dev);
- }
-@@ -4577,15 +4588,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- 		return ret;
- 	}
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
-+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
- 	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
--	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
--	spin_unlock(&dev_priv->mm.active_list_lock);
--
-+	BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
-+	BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
- 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- 	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
--	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
-+	BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
-+	BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
- 	mutex_unlock(&dev->struct_mutex);
- 
- 	ret = drm_irq_install(dev);
-@@ -4627,28 +4638,34 @@ i915_gem_lastclose(struct drm_device *dev)
- 		DRM_ERROR("failed to idle hardware: %d\n", ret);
- }
- 
-+static void
-+init_ring_lists(struct intel_ring_buffer *ring)
-+{
-+	INIT_LIST_HEAD(&ring->active_list);
-+	INIT_LIST_HEAD(&ring->request_list);
-+	INIT_LIST_HEAD(&ring->gpu_write_list);
-+}
-+
- void
- i915_gem_load(struct drm_device *dev)
- {
- 	int i;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 
--	spin_lock_init(&dev_priv->mm.active_list_lock);
-+	INIT_LIST_HEAD(&dev_priv->mm.active_list);
- 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
--	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
- 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-+	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
- 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- 	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
--	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
--	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
--	if (HAS_BSD(dev)) {
--		INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
--		INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
--	}
-+	init_ring_lists(&dev_priv->render_ring);
-+	init_ring_lists(&dev_priv->bsd_ring);
-+	init_ring_lists(&dev_priv->blt_ring);
- 	for (i = 0; i < 16; i++)
- 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
- 			  i915_gem_retire_work_handler);
-+	init_completion(&dev_priv->error_completion);
- 	spin_lock(&shrink_list_lock);
- 	list_add(&dev_priv->mm.shrink_list, &shrink_list);
- 	spin_unlock(&shrink_list_lock);
-@@ -4667,21 +4684,30 @@ i915_gem_load(struct drm_device *dev)
- 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
- 		dev_priv->fence_reg_start = 3;
- 
--	if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-+	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- 		dev_priv->num_fence_regs = 16;
- 	else
- 		dev_priv->num_fence_regs = 8;
- 
- 	/* Initialize fence registers to zero */
--	if (IS_I965G(dev)) {
-+	switch (INTEL_INFO(dev)->gen) {
-+	case 6:
-+		for (i = 0; i < 16; i++)
-+			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
-+		break;
-+	case 5:
-+	case 4:
- 		for (i = 0; i < 16; i++)
- 			I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
--	} else {
--		for (i = 0; i < 8; i++)
--			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
-+		break;
-+	case 3:
- 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- 			for (i = 0; i < 8; i++)
- 				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
-+	case 2:
-+		for (i = 0; i < 8; i++)
-+			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
-+		break;
- 	}
- 	i915_gem_detect_bit_6_swizzle(dev);
- 	init_waitqueue_head(&dev_priv->pending_flip_queue);
-@@ -4691,8 +4717,8 @@ i915_gem_load(struct drm_device *dev)
-  * Create a physically contiguous memory object for this object
-  * e.g. for cursor + overlay regs
-  */
--int i915_gem_init_phys_object(struct drm_device *dev,
--			      int id, int size, int align)
-+static int i915_gem_init_phys_object(struct drm_device *dev,
-+				     int id, int size, int align)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_phys_object *phys_obj;
-@@ -4724,7 +4750,7 @@ kfree_obj:
- 	return ret;
- }
- 
--void i915_gem_free_phys_object(struct drm_device *dev, int id)
-+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_phys_object *phys_obj;
-@@ -4853,34 +4879,48 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- 		     struct drm_file *file_priv)
- {
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
--	void *obj_addr;
--	int ret;
--	char __user *user_data;
-+	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
-+	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- 
--	user_data = (char __user *) (uintptr_t) args->data_ptr;
--	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
-+	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
- 
--	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
--	ret = copy_from_user(obj_addr, user_data, args->size);
--	if (ret)
--		return -EFAULT;
-+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-+		unsigned long unwritten;
-+
-+		/* The physical object once assigned is fixed for the lifetime
-+		 * of the obj, so we can safely drop the lock and continue
-+		 * to access vaddr.
-+		 */
-+		mutex_unlock(&dev->struct_mutex);
-+		unwritten = copy_from_user(vaddr, user_data, args->size);
-+		mutex_lock(&dev->struct_mutex);
-+		if (unwritten)
-+			return -EFAULT;
-+	}
- 
- 	drm_agp_chipset_flush(dev);
- 	return 0;
- }
- 
--void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
-+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
- {
--	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-+	struct drm_i915_file_private *file_priv = file->driver_priv;
- 
- 	/* Clean up our request list when the client is going away, so that
- 	 * later retire_requests won't dereference our soon-to-be-gone
- 	 * file_priv.
- 	 */
--	mutex_lock(&dev->struct_mutex);
--	while (!list_empty(&i915_file_priv->mm.request_list))
--		list_del_init(i915_file_priv->mm.request_list.next);
--	mutex_unlock(&dev->struct_mutex);
-+	spin_lock(&file_priv->mm.lock);
-+	while (!list_empty(&file_priv->mm.request_list)) {
-+		struct drm_i915_gem_request *request;
-+
-+		request = list_first_entry(&file_priv->mm.request_list,
-+					   struct drm_i915_gem_request,
-+					   client_list);
-+		list_del(&request->client_list);
-+		request->file_priv = NULL;
-+	}
-+	spin_unlock(&file_priv->mm.lock);
- }
- 
- static int
-@@ -4889,12 +4929,8 @@ i915_gpu_is_active(struct drm_device *dev)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	int lists_empty;
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
- 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
--		      list_empty(&dev_priv->render_ring.active_list);
--	if (HAS_BSD(dev))
--		lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
--	spin_unlock(&dev_priv->mm.active_list_lock);
-+		      list_empty(&dev_priv->mm.active_list);
- 
- 	return !lists_empty;
- }
-@@ -4916,7 +4952,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
- 			if (mutex_trylock(&dev->struct_mutex)) {
- 				list_for_each_entry(obj_priv,
- 						    &dev_priv->mm.inactive_list,
--						    list)
-+						    mm_list)
- 					cnt++;
- 				mutex_unlock(&dev->struct_mutex);
- 			}
-@@ -4942,7 +4978,7 @@ rescan:
- 
- 		list_for_each_entry_safe(obj_priv, next_obj,
- 					 &dev_priv->mm.inactive_list,
--					 list) {
-+					 mm_list) {
- 			if (i915_gem_object_is_purgeable(obj_priv)) {
- 				i915_gem_object_unbind(&obj_priv->base);
- 				if (--nr_to_scan <= 0)
-@@ -4971,7 +5007,7 @@ rescan:
- 
- 		list_for_each_entry_safe(obj_priv, next_obj,
- 					 &dev_priv->mm.inactive_list,
--					 list) {
-+					 mm_list) {
- 			if (nr_to_scan > 0) {
- 				i915_gem_object_unbind(&obj_priv->base);
- 				nr_to_scan--;
-diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
-index 80f380b..48644b8 100644
---- a/drivers/gpu/drm/i915/i915_gem_debug.c
-+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
-@@ -30,29 +30,112 @@
- #include "i915_drm.h"
- #include "i915_drv.h"
- 
--#if WATCH_INACTIVE
--void
--i915_verify_inactive(struct drm_device *dev, char *file, int line)
-+#if WATCH_LISTS
-+int
-+i915_verify_lists(struct drm_device *dev)
- {
-+	static int warned;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	struct drm_gem_object *obj;
--	struct drm_i915_gem_object *obj_priv;
--
--	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
--		obj = &obj_priv->base;
--		if (obj_priv->pin_count || obj_priv->active ||
--		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
--					   I915_GEM_DOMAIN_GTT)))
--			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
-+	struct drm_i915_gem_object *obj;
-+	int err = 0;
-+
-+	if (warned)
-+		return 0;
-+
-+	list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
-+		if (obj->base.dev != dev ||
-+		    !atomic_read(&obj->base.refcount.refcount)) {
-+			DRM_ERROR("freed render active %p\n", obj);
-+			err++;
-+			break;
-+		} else if (!obj->active ||
-+			   (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
-+			DRM_ERROR("invalid render active %p (a %d r %x)\n",
-+				  obj,
-+				  obj->active,
-+				  obj->base.read_domains);
-+			err++;
-+		} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
-+			DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
-+				  obj,
-+				  obj->base.write_domain,
-+				  !list_empty(&obj->gpu_write_list));
-+			err++;
-+		}
-+	}
-+
-+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
-+		if (obj->base.dev != dev ||
-+		    !atomic_read(&obj->base.refcount.refcount)) {
-+			DRM_ERROR("freed flushing %p\n", obj);
-+			err++;
-+			break;
-+		} else if (!obj->active ||
-+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
-+			   list_empty(&obj->gpu_write_list)){
-+			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
- 				  obj,
--				  obj_priv->pin_count, obj_priv->active,
--				  obj->write_domain, file, line);
-+				  obj->active,
-+				  obj->base.write_domain,
-+				  !list_empty(&obj->gpu_write_list));
-+			err++;
-+		}
-+	}
-+
-+	list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
-+		if (obj->base.dev != dev ||
-+		    !atomic_read(&obj->base.refcount.refcount)) {
-+			DRM_ERROR("freed gpu write %p\n", obj);
-+			err++;
-+			break;
-+		} else if (!obj->active ||
-+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
-+			DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
-+				  obj,
-+				  obj->active,
-+				  obj->base.write_domain);
-+			err++;
-+		}
-+	}
-+
-+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
-+		if (obj->base.dev != dev ||
-+		    !atomic_read(&obj->base.refcount.refcount)) {
-+			DRM_ERROR("freed inactive %p\n", obj);
-+			err++;
-+			break;
-+		} else if (obj->pin_count || obj->active ||
-+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
-+			DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
-+				  obj,
-+				  obj->pin_count, obj->active,
-+				  obj->base.write_domain);
-+			err++;
-+		}
- 	}
-+
-+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
-+		if (obj->base.dev != dev ||
-+		    !atomic_read(&obj->base.refcount.refcount)) {
-+			DRM_ERROR("freed pinned %p\n", obj);
-+			err++;
-+			break;
-+		} else if (!obj->pin_count || obj->active ||
-+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
-+			DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
-+				  obj,
-+				  obj->pin_count, obj->active,
-+				  obj->base.write_domain);
-+			err++;
-+		}
-+	}
-+
-+	return warned = err;
- }
- #endif /* WATCH_INACTIVE */
- 
- 
--#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-+#if WATCH_EXEC | WATCH_PWRITE
- static void
- i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- 		   uint32_t bias, uint32_t mark)
-@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
- }
- #endif
- 
--#if WATCH_LRU
--void
--i915_dump_lru(struct drm_device *dev, const char *where)
--{
--	drm_i915_private_t		*dev_priv = dev->dev_private;
--	struct drm_i915_gem_object	*obj_priv;
--
--	DRM_INFO("active list %s {\n", where);
--	spin_lock(&dev_priv->mm.active_list_lock);
--	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
--			    list)
--	{
--		DRM_INFO("    %p: %08x\n", obj_priv,
--			 obj_priv->last_rendering_seqno);
--	}
--	spin_unlock(&dev_priv->mm.active_list_lock);
--	DRM_INFO("}\n");
--	DRM_INFO("flushing list %s {\n", where);
--	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
--			    list)
--	{
--		DRM_INFO("    %p: %08x\n", obj_priv,
--			 obj_priv->last_rendering_seqno);
--	}
--	DRM_INFO("}\n");
--	DRM_INFO("inactive %s {\n", where);
--	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
--		DRM_INFO("    %p: %08x\n", obj_priv,
--			 obj_priv->last_rendering_seqno);
--	}
--	DRM_INFO("}\n");
--}
--#endif
--
--
- #if WATCH_COHERENCY
- void
- i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
-diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
-index 5c428fa..d8ae7d1 100644
---- a/drivers/gpu/drm/i915/i915_gem_evict.c
-+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
-@@ -31,49 +31,6 @@
- #include "i915_drv.h"
- #include "i915_drm.h"
- 
--static struct drm_i915_gem_object *
--i915_gem_next_active_object(struct drm_device *dev,
--			    struct list_head **render_iter,
--			    struct list_head **bsd_iter)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
--
--	if (*render_iter != &dev_priv->render_ring.active_list)
--		render_obj = list_entry(*render_iter,
--					struct drm_i915_gem_object,
--					list);
--
--	if (HAS_BSD(dev)) {
--		if (*bsd_iter != &dev_priv->bsd_ring.active_list)
--			bsd_obj = list_entry(*bsd_iter,
--					     struct drm_i915_gem_object,
--					     list);
--
--		if (render_obj == NULL) {
--			*bsd_iter = (*bsd_iter)->next;
--			return bsd_obj;
--		}
--
--		if (bsd_obj == NULL) {
--			*render_iter = (*render_iter)->next;
--			return render_obj;
--		}
--
--		/* XXX can we handle seqno wrapping? */
--		if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
--			*render_iter = (*render_iter)->next;
--			return render_obj;
--		} else {
--			*bsd_iter = (*bsd_iter)->next;
--			return bsd_obj;
--		}
--	} else {
--		*render_iter = (*render_iter)->next;
--		return render_obj;
--	}
--}
--
- static bool
- mark_free(struct drm_i915_gem_object *obj_priv,
- 	   struct list_head *unwind)
-@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
- 	return drm_mm_scan_add_block(obj_priv->gtt_space);
- }
- 
--#define i915_for_each_active_object(OBJ, R, B) \
--	*(R) = dev_priv->render_ring.active_list.next; \
--	*(B) = dev_priv->bsd_ring.active_list.next; \
--	while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
--
- int
- i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct list_head eviction_list, unwind_list;
- 	struct drm_i915_gem_object *obj_priv;
--	struct list_head *render_iter, *bsd_iter;
- 	int ret = 0;
- 
- 	i915_gem_retire_requests(dev);
-@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
- 	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
- 
- 	/* First see if there is a large enough contiguous idle region... */
--	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- 		if (mark_free(obj_priv, &unwind_list))
- 			goto found;
- 	}
- 
- 	/* Now merge in the soon-to-be-expired objects... */
--	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
-+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- 		/* Does the object require an outstanding flush? */
- 		if (obj_priv->base.write_domain || obj_priv->pin_count)
- 			continue;
-@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
- 	}
- 
- 	/* Finally add anything with a pending flush (in order of retirement) */
--	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- 		if (obj_priv->pin_count)
- 			continue;
- 
- 		if (mark_free(obj_priv, &unwind_list))
- 			goto found;
- 	}
--	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
-+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- 		if (! obj_priv->base.write_domain || obj_priv->pin_count)
- 			continue;
- 
-@@ -212,14 +163,9 @@ i915_gem_evict_everything(struct drm_device *dev)
- 	int ret;
- 	bool lists_empty;
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
- 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- 		       list_empty(&dev_priv->mm.flushing_list) &&
--		       list_empty(&dev_priv->render_ring.active_list) &&
--		       (!HAS_BSD(dev)
--			|| list_empty(&dev_priv->bsd_ring.active_list)));
--	spin_unlock(&dev_priv->mm.active_list_lock);
--
-+		       list_empty(&dev_priv->mm.active_list));
- 	if (lists_empty)
- 		return -ENOSPC;
- 
-@@ -234,13 +180,9 @@ i915_gem_evict_everything(struct drm_device *dev)
- 	if (ret)
- 		return ret;
- 
--	spin_lock(&dev_priv->mm.active_list_lock);
- 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- 		       list_empty(&dev_priv->mm.flushing_list) &&
--		       list_empty(&dev_priv->render_ring.active_list) &&
--		       (!HAS_BSD(dev)
--			|| list_empty(&dev_priv->bsd_ring.active_list)));
--	spin_unlock(&dev_priv->mm.active_list_lock);
-+		       list_empty(&dev_priv->mm.active_list));
- 	BUG_ON(!lists_empty);
- 
- 	return 0;
-@@ -258,7 +200,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
- 
- 		obj = &list_first_entry(&dev_priv->mm.inactive_list,
- 					struct drm_i915_gem_object,
--					list)->base;
-+					mm_list)->base;
- 
- 		ret = i915_gem_object_unbind(obj);
- 		if (ret != 0) {
-diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
-index 710eca7..af352de 100644
---- a/drivers/gpu/drm/i915/i915_gem_tiling.c
-+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
-@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
- 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- 
--	if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
-+	if (IS_GEN5(dev) || IS_GEN6(dev)) {
- 		/* On Ironlake whatever DRAM config, GPU always do
- 		 * same swizzling setup.
- 		 */
- 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- 		swizzle_y = I915_BIT_6_SWIZZLE_9;
--	} else if (!IS_I9XX(dev)) {
-+	} else if (IS_GEN2(dev)) {
- 		/* As far as we know, the 865 doesn't have these bit 6
- 		 * swizzling issues.
- 		 */
-@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
- 	if (tiling_mode == I915_TILING_NONE)
- 		return true;
- 
--	if (!IS_I9XX(dev) ||
-+	if (IS_GEN2(dev) ||
- 	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
- 		tile_width = 128;
- 	else
- 		tile_width = 512;
- 
- 	/* check maximum stride & object size */
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		/* i965 stores the end address of the gtt mapping in the fence
- 		 * reg, so dont bother to check the size */
- 		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
- 			return false;
--	} else if (IS_GEN3(dev) || IS_GEN2(dev)) {
-+	} else {
- 		if (stride > 8192)
- 			return false;
- 
-@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
- 	}
- 
- 	/* 965+ just needs multiples of tile width */
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		if (stride & (tile_width - 1))
- 			return false;
- 		return true;
-@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
- 	if (tiling_mode == I915_TILING_NONE)
- 		return true;
- 
--	if (!IS_I965G(dev)) {
--		if (obj_priv->gtt_offset & (obj->size - 1))
-+	if (INTEL_INFO(dev)->gen >= 4)
-+		return true;
-+
-+	if (obj_priv->gtt_offset & (obj->size - 1))
-+		return false;
-+
-+	if (IS_GEN3(dev)) {
-+		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
-+			return false;
-+	} else {
-+		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- 			return false;
--		if (IS_I9XX(dev)) {
--			if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
--				return false;
--		} else {
--			if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
--				return false;
--		}
- 	}
- 
- 	return true;
-@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_gem_object *obj;
- 	struct drm_i915_gem_object *obj_priv;
--	int ret = 0;
-+	int ret;
-+
-+	ret = i915_gem_check_is_wedged(dev);
-+	if (ret)
-+		return ret;
- 
- 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- 	if (obj == NULL)
-@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
- 		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- 			ret = i915_gem_object_unbind(obj);
- 		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
--			ret = i915_gem_object_put_fence_reg(obj);
-+			ret = i915_gem_object_put_fence_reg(obj, true);
- 		else
- 			i915_gem_release_mmap(obj);
- 
-@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
-  * bit 17 of its physical address and therefore being interpreted differently
-  * by the GPU.
-  */
--static int
-+static void
- i915_gem_swizzle_page(struct page *page)
- {
-+	char temp[64];
- 	char *vaddr;
- 	int i;
--	char temp[64];
- 
- 	vaddr = kmap(page);
--	if (vaddr == NULL)
--		return -ENOMEM;
- 
- 	for (i = 0; i < PAGE_SIZE; i += 128) {
- 		memcpy(temp, &vaddr[i], 64);
-@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
- 	}
- 
- 	kunmap(page);
--
--	return 0;
- }
- 
- void
-@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
- 		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
- 		if ((new_bit_17 & 0x1) !=
- 		    (test_bit(i, obj_priv->bit_17) != 0)) {
--			int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
--			if (ret != 0) {
--				DRM_ERROR("Failed to swizzle page\n");
--				return;
--			}
-+			i915_gem_swizzle_page(obj_priv->pages[i]);
- 			set_page_dirty(obj_priv->pages[i]);
- 		}
- 	}
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 744225e..237b8bd 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
- }
- 
- /* For display hotplug interrupt */
--void
-+static void
- ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
- {
- 	if ((dev_priv->irq_mask_reg & mask) != 0) {
-@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
- 	else {
- 		i915_enable_pipestat(dev_priv, 1,
- 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
--		if (IS_I965G(dev))
-+		if (INTEL_INFO(dev)->gen >= 4)
- 			i915_enable_pipestat(dev_priv, 0,
- 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
- 	}
-@@ -191,12 +191,7 @@ static int
- i915_pipe_enabled(struct drm_device *dev, int pipe)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
--	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
--
--	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
--		return 1;
--
--	return 0;
-+	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
- }
- 
- /* Called from drm generic code, passed a 'crtc', which
-@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	unsigned long high_frame;
- 	unsigned long low_frame;
--	u32 high1, high2, low, count;
--
--	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
--	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
-+	u32 high1, high2, low;
- 
- 	if (!i915_pipe_enabled(dev, pipe)) {
- 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
-@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
- 		return 0;
- 	}
- 
-+	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-+	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
-+
- 	/*
- 	 * High & low register fields aren't synchronized, so make sure
- 	 * we get a low value that's stable across two reads of the high
- 	 * register.
- 	 */
- 	do {
--		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
--			 PIPE_FRAME_HIGH_SHIFT);
--		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
--			PIPE_FRAME_LOW_SHIFT);
--		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
--			 PIPE_FRAME_HIGH_SHIFT);
-+		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-+		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
-+		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
- 	} while (high1 != high2);
- 
--	count = (high1 << 8) | low;
--
--	return count;
-+	high1 >>= PIPE_FRAME_HIGH_SHIFT;
-+	low >>= PIPE_FRAME_LOW_SHIFT;
-+	return (high1 << 8) | low;
- }
- 
- u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
-@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
- 						    hotplug_work);
- 	struct drm_device *dev = dev_priv->dev;
- 	struct drm_mode_config *mode_config = &dev->mode_config;
--	struct drm_encoder *encoder;
--
--	if (mode_config->num_encoder) {
--		list_for_each_entry(encoder, &mode_config->encoder_list, head) {
--			struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--	
--			if (intel_encoder->hot_plug)
--				(*intel_encoder->hot_plug) (intel_encoder);
--		}
--	}
-+	struct intel_encoder *encoder;
-+
-+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
-+		if (encoder->hot_plug)
-+			encoder->hot_plug(encoder);
-+
- 	/* Just fire off a uevent and let userspace tell us what to do */
- 	drm_helper_hpd_irq_event(dev);
- }
-@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
- 	return;
- }
- 
--irqreturn_t ironlake_irq_handler(struct drm_device *dev)
-+static void notify_ring(struct drm_device *dev,
-+			struct intel_ring_buffer *ring)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 seqno = ring->get_seqno(dev, ring);
-+	ring->irq_gem_seqno = seqno;
-+	trace_i915_gem_request_complete(dev, seqno);
-+	wake_up_all(&ring->irq_queue);
-+	dev_priv->hangcheck_count = 0;
-+	mod_timer(&dev_priv->hangcheck_timer,
-+		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
-+}
-+
-+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	int ret = IRQ_NONE;
- 	u32 de_iir, gt_iir, de_ier, pch_iir;
-+	u32 hotplug_mask;
- 	struct drm_i915_master_private *master_priv;
--	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
-+	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
-+
-+	if (IS_GEN6(dev))
-+		bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
- 
- 	/* disable master interrupt before clearing iir  */
- 	de_ier = I915_READ(DEIER);
-@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
- 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
- 		goto done;
- 
-+	if (HAS_PCH_CPT(dev))
-+		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
-+	else
-+		hotplug_mask = SDE_HOTPLUG_MASK;
-+
- 	ret = IRQ_HANDLED;
- 
- 	if (dev->primary->master) {
-@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
- 				READ_BREADCRUMB(dev_priv);
- 	}
- 
--	if (gt_iir & GT_PIPE_NOTIFY) {
--		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
--		render_ring->irq_gem_seqno = seqno;
--		trace_i915_gem_request_complete(dev, seqno);
--		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
--		dev_priv->hangcheck_count = 0;
--		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
--	}
--	if (gt_iir & GT_BSD_USER_INTERRUPT)
--		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
--
-+	if (gt_iir & GT_PIPE_NOTIFY)
-+		notify_ring(dev, &dev_priv->render_ring);
-+	if (gt_iir & bsd_usr_interrupt)
-+		notify_ring(dev, &dev_priv->bsd_ring);
-+	if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
-+		notify_ring(dev, &dev_priv->blt_ring);
- 
- 	if (de_iir & DE_GSE)
--		ironlake_opregion_gse_intr(dev);
-+		intel_opregion_gse_intr(dev);
- 
- 	if (de_iir & DE_PLANEA_FLIP_DONE) {
- 		intel_prepare_page_flip(dev, 0);
--		intel_finish_page_flip(dev, 0);
-+		intel_finish_page_flip_plane(dev, 0);
- 	}
- 
- 	if (de_iir & DE_PLANEB_FLIP_DONE) {
- 		intel_prepare_page_flip(dev, 1);
--		intel_finish_page_flip(dev, 1);
-+		intel_finish_page_flip_plane(dev, 1);
- 	}
- 
- 	if (de_iir & DE_PIPEA_VBLANK)
-@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
- 		drm_handle_vblank(dev, 1);
- 
- 	/* check event from PCH */
--	if ((de_iir & DE_PCH_EVENT) &&
--	    (pch_iir & SDE_HOTPLUG_MASK)) {
-+	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
- 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
--	}
- 
- 	if (de_iir & DE_PCU_EVENT) {
- 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
-@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
- 	char *reset_event[] = { "RESET=1", NULL };
- 	char *reset_done_event[] = { "ERROR=0", NULL };
- 
--	DRM_DEBUG_DRIVER("generating error event\n");
- 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
- 
- 	if (atomic_read(&dev_priv->mm.wedged)) {
--		if (IS_I965G(dev)) {
--			DRM_DEBUG_DRIVER("resetting chip\n");
--			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
--			if (!i965_reset(dev, GDRST_RENDER)) {
--				atomic_set(&dev_priv->mm.wedged, 0);
--				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
--			}
--		} else {
--			DRM_DEBUG_DRIVER("reboot required\n");
-+		DRM_DEBUG_DRIVER("resetting chip\n");
-+		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
-+		if (!i915_reset(dev, GRDOM_RENDER)) {
-+			atomic_set(&dev_priv->mm.wedged, 0);
-+			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
- 		}
-+		complete_all(&dev_priv->error_completion);
- 	}
- }
- 
-+#ifdef CONFIG_DEBUG_FS
- static struct drm_i915_error_object *
- i915_error_object_create(struct drm_device *dev,
- 			 struct drm_gem_object *src)
-@@ -511,7 +511,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
- 
- 	if (IS_I830(dev) || IS_845G(dev))
- 		cmd = MI_BATCH_BUFFER;
--	else if (IS_I965G(dev))
-+	else if (INTEL_INFO(dev)->gen >= 4)
- 		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
- 		       MI_BATCH_NON_SECURE_I965);
- 	else
-@@ -584,13 +584,16 @@ static void i915_capture_error_state(struct drm_device *dev)
- 		return;
- 	}
- 
--	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
-+	DRM_DEBUG_DRIVER("generating error event\n");
-+
-+	error->seqno =
-+		dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
- 	error->eir = I915_READ(EIR);
- 	error->pgtbl_er = I915_READ(PGTBL_ER);
- 	error->pipeastat = I915_READ(PIPEASTAT);
- 	error->pipebstat = I915_READ(PIPEBSTAT);
- 	error->instpm = I915_READ(INSTPM);
--	if (!IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen < 4) {
- 		error->ipeir = I915_READ(IPEIR);
- 		error->ipehr = I915_READ(IPEHR);
- 		error->instdone = I915_READ(INSTDONE);
-@@ -612,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev)
- 	batchbuffer[0] = NULL;
- 	batchbuffer[1] = NULL;
- 	count = 0;
--	list_for_each_entry(obj_priv,
--			&dev_priv->render_ring.active_list, list) {
--
-+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- 		struct drm_gem_object *obj = &obj_priv->base;
- 
- 		if (batchbuffer[0] == NULL &&
-@@ -631,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev)
- 	}
- 	/* Scan the other lists for completeness for those bizarre errors. */
- 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
--		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-+		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- 			struct drm_gem_object *obj = &obj_priv->base;
- 
- 			if (batchbuffer[0] == NULL &&
-@@ -649,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev)
- 		}
- 	}
- 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
--		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-+		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- 			struct drm_gem_object *obj = &obj_priv->base;
- 
- 			if (batchbuffer[0] == NULL &&
-@@ -668,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev)
- 	}
- 
- 	/* We need to copy these to an anonymous buffer as the simplest
--	 * method to avoid being overwritten by userpace.
-+	 * method to avoid being overwritten by userspace.
- 	 */
- 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- 	if (batchbuffer[1] != batchbuffer[0])
-@@ -690,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev)
- 
- 	if (error->active_bo) {
- 		int i = 0;
--		list_for_each_entry(obj_priv,
--				&dev_priv->render_ring.active_list, list) {
-+		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- 			struct drm_gem_object *obj = &obj_priv->base;
- 
- 			error->active_bo[i].size = obj->size;
-@@ -744,6 +744,9 @@ void i915_destroy_error_state(struct drm_device *dev)
- 	if (error)
- 		i915_error_state_free(dev, error);
- }
-+#else
-+#define i915_capture_error_state(x)
-+#endif
- 
- static void i915_report_and_clear_eir(struct drm_device *dev)
- {
-@@ -785,7 +788,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
- 		}
- 	}
- 
--	if (IS_I9XX(dev)) {
-+	if (!IS_GEN2(dev)) {
- 		if (eir & I915_ERROR_PAGE_TABLE) {
- 			u32 pgtbl_err = I915_READ(PGTBL_ER);
- 			printk(KERN_ERR "page table error\n");
-@@ -811,7 +814,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
- 		printk(KERN_ERR "instruction error\n");
- 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
- 		       I915_READ(INSTPM));
--		if (!IS_I965G(dev)) {
-+		if (INTEL_INFO(dev)->gen < 4) {
- 			u32 ipeir = I915_READ(IPEIR);
- 
- 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
-@@ -876,12 +879,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
- 	i915_report_and_clear_eir(dev);
- 
- 	if (wedged) {
-+		INIT_COMPLETION(dev_priv->error_completion);
- 		atomic_set(&dev_priv->mm.wedged, 1);
- 
- 		/*
- 		 * Wakeup waiting processes so they don't hang
- 		 */
--		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-+		wake_up_all(&dev_priv->render_ring.irq_queue);
-+		if (HAS_BSD(dev))
-+			wake_up_all(&dev_priv->bsd_ring.irq_queue);
-+		if (HAS_BLT(dev))
-+			wake_up_all(&dev_priv->blt_ring.irq_queue);
- 	}
- 
- 	queue_work(dev_priv->wq, &dev_priv->error_work);
-@@ -912,7 +920,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
- 
- 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- 	obj_priv = to_intel_bo(work->pending_flip_obj);
--	if(IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- 		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
- 	} else {
-@@ -942,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- 	unsigned long irqflags;
- 	int irq_received;
- 	int ret = IRQ_NONE;
--	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
- 
- 	atomic_inc(&dev_priv->irq_received);
- 
-@@ -951,7 +958,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- 
- 	iir = I915_READ(IIR);
- 
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
- 	else
- 		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-@@ -1019,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- 					READ_BREADCRUMB(dev_priv);
- 		}
- 
--		if (iir & I915_USER_INTERRUPT) {
--			u32 seqno =
--				render_ring->get_gem_seqno(dev, render_ring);
--			render_ring->irq_gem_seqno = seqno;
--			trace_i915_gem_request_complete(dev, seqno);
--			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
--			dev_priv->hangcheck_count = 0;
--			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
--		}
--
-+		if (iir & I915_USER_INTERRUPT)
-+			notify_ring(dev, &dev_priv->render_ring);
- 		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
--			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-+			notify_ring(dev, &dev_priv->bsd_ring);
- 
- 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- 			intel_prepare_page_flip(dev, 0);
-@@ -1065,7 +1064,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- 		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- 		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- 		    (iir & I915_ASLE_INTERRUPT))
--			opregion_asle_intr(dev);
-+			intel_opregion_asle_intr(dev);
- 
- 		/* With MSI, interrupts are only generated when iir
- 		 * transitions from zero to nonzero.  If another bit got
-@@ -1207,18 +1206,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	unsigned long irqflags;
--	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
--	u32 pipeconf;
- 
--	pipeconf = I915_READ(pipeconf_reg);
--	if (!(pipeconf & PIPEACONF_ENABLE))
-+	if (!i915_pipe_enabled(dev, pipe))
- 		return -EINVAL;
- 
- 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- 	if (HAS_PCH_SPLIT(dev))
- 		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
- 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
--	else if (IS_I965G(dev))
-+	else if (INTEL_INFO(dev)->gen >= 4)
- 		i915_enable_pipestat(dev_priv, pipe,
- 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
- 	else
-@@ -1252,7 +1248,7 @@ void i915_enable_interrupt (struct drm_device *dev)
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 
- 	if (!HAS_PCH_SPLIT(dev))
--		opregion_enable_asle(dev);
-+		intel_opregion_enable_asle(dev);
- 	dev_priv->irq_enabled = 1;
- }
- 
-@@ -1311,7 +1307,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- 	return -EINVAL;
- }
- 
--struct drm_i915_gem_request *
-+static struct drm_i915_gem_request *
- i915_get_tail_request(struct drm_device *dev)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -1331,11 +1327,7 @@ void i915_hangcheck_elapsed(unsigned long data)
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	uint32_t acthd, instdone, instdone1;
- 
--	/* No reset support on this chip yet. */
--	if (IS_GEN6(dev))
--		return;
--
--	if (!IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen < 4) {
- 		acthd = I915_READ(ACTHD);
- 		instdone = I915_READ(INSTDONE);
- 		instdone1 = 0;
-@@ -1347,9 +1339,8 @@ void i915_hangcheck_elapsed(unsigned long data)
- 
- 	/* If all work is done then ACTHD clearly hasn't advanced. */
- 	if (list_empty(&dev_priv->render_ring.request_list) ||
--		i915_seqno_passed(i915_get_gem_seqno(dev,
--				&dev_priv->render_ring),
--			i915_get_tail_request(dev)->seqno)) {
-+		i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
-+				  i915_get_tail_request(dev)->seqno)) {
- 		bool missed_wakeup = false;
- 
- 		dev_priv->hangcheck_count = 0;
-@@ -1357,13 +1348,19 @@ void i915_hangcheck_elapsed(unsigned long data)
- 		/* Issue a wake-up to catch stuck h/w. */
- 		if (dev_priv->render_ring.waiting_gem_seqno &&
- 		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
--			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-+			wake_up_all(&dev_priv->render_ring.irq_queue);
- 			missed_wakeup = true;
- 		}
- 
- 		if (dev_priv->bsd_ring.waiting_gem_seqno &&
- 		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
--			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-+			wake_up_all(&dev_priv->bsd_ring.irq_queue);
-+			missed_wakeup = true;
-+		}
-+
-+		if (dev_priv->blt_ring.waiting_gem_seqno &&
-+		    waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
-+			wake_up_all(&dev_priv->blt_ring.irq_queue);
- 			missed_wakeup = true;
- 		}
- 
-@@ -1377,6 +1374,21 @@ void i915_hangcheck_elapsed(unsigned long data)
- 	    dev_priv->last_instdone1 == instdone1) {
- 		if (dev_priv->hangcheck_count++ > 1) {
- 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
-+
-+			if (!IS_GEN2(dev)) {
-+				/* Is the chip hanging on a WAIT_FOR_EVENT?
-+				 * If so we can simply poke the RB_WAIT bit
-+				 * and break the hang. This should work on
-+				 * all but the second generation chipsets.
-+				 */
-+				u32 tmp = I915_READ(PRB0_CTL);
-+				if (tmp & RING_WAIT) {
-+					I915_WRITE(PRB0_CTL, tmp);
-+					POSTING_READ(PRB0_CTL);
-+					goto out;
-+				}
-+			}
-+
- 			i915_handle_error(dev, true);
- 			return;
- 		}
-@@ -1388,8 +1400,10 @@ void i915_hangcheck_elapsed(unsigned long data)
- 		dev_priv->last_instdone1 = instdone1;
- 	}
- 
-+out:
- 	/* Reset timer case chip hangs without another request being added */
--	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-+	mod_timer(&dev_priv->hangcheck_timer,
-+		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
- }
- 
- /* drm_dma.h hooks
-@@ -1424,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
- 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- 	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
--	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
--			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-+	u32 hotplug_mask;
- 
- 	dev_priv->irq_mask_reg = ~display_mask;
- 	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
-@@ -1436,20 +1449,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
- 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- 	(void) I915_READ(DEIER);
- 
--	/* Gen6 only needs render pipe_control now */
--	if (IS_GEN6(dev))
--		render_mask = GT_PIPE_NOTIFY;
-+	if (IS_GEN6(dev)) {
-+		render_mask =
-+			GT_PIPE_NOTIFY |
-+			GT_GEN6_BSD_USER_INTERRUPT |
-+			GT_BLT_USER_INTERRUPT;
-+	}
- 
- 	dev_priv->gt_irq_mask_reg = ~render_mask;
- 	dev_priv->gt_irq_enable_reg = render_mask;
- 
- 	I915_WRITE(GTIIR, I915_READ(GTIIR));
- 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
--	if (IS_GEN6(dev))
-+	if (IS_GEN6(dev)) {
- 		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
-+		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
-+		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
-+	}
-+
- 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- 	(void) I915_READ(GTIER);
- 
-+	if (HAS_PCH_CPT(dev)) {
-+		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
-+			       SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
-+	} else {
-+		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
-+			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-+	}
-+
- 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- 	dev_priv->pch_irq_enable_reg = hotplug_mask;
- 
-@@ -1506,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
- 	u32 error_mask;
- 
- 	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
--
- 	if (HAS_BSD(dev))
- 		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
-+	if (HAS_BLT(dev))
-+		DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
- 
- 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- 
-@@ -1578,7 +1607,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
- 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- 	}
- 
--	opregion_enable_asle(dev);
-+	intel_opregion_enable_asle(dev);
- 
- 	return 0;
- }
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 4f5e155..25ed911 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -25,52 +25,16 @@
- #ifndef _I915_REG_H_
- #define _I915_REG_H_
- 
-+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
-+
- /*
-  * The Bridge device's PCI config space has information about the
-  * fb aperture size and the amount of pre-reserved memory.
-+ * This is all handled in the intel-gtt.ko module. i915.ko only
-+ * cares about the vga bit for the vga rbiter.
-  */
- #define INTEL_GMCH_CTRL		0x52
- #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
--#define INTEL_GMCH_ENABLED	0x4
--#define INTEL_GMCH_MEM_MASK	0x1
--#define INTEL_GMCH_MEM_64M	0x1
--#define INTEL_GMCH_MEM_128M	0
--
--#define INTEL_GMCH_GMS_MASK		(0xf << 4)
--#define INTEL_855_GMCH_GMS_DISABLED	(0x0 << 4)
--#define INTEL_855_GMCH_GMS_STOLEN_1M	(0x1 << 4)
--#define INTEL_855_GMCH_GMS_STOLEN_4M	(0x2 << 4)
--#define INTEL_855_GMCH_GMS_STOLEN_8M	(0x3 << 4)
--#define INTEL_855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
--#define INTEL_855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
--
--#define INTEL_915G_GMCH_GMS_STOLEN_48M	(0x6 << 4)
--#define INTEL_915G_GMCH_GMS_STOLEN_64M	(0x7 << 4)
--#define INTEL_GMCH_GMS_STOLEN_128M	(0x8 << 4)
--#define INTEL_GMCH_GMS_STOLEN_256M	(0x9 << 4)
--#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
--#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
--#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
--#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
--
--#define SNB_GMCH_CTRL	0x50
--#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
--#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
--#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
--#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
--#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
--#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
--#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
--#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
--#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
--#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
--#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
--#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
--#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
--#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
--#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
--#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
--#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
- 
- /* PCI config space */
- 
-@@ -106,10 +70,13 @@
- #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
- #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
- #define LBB	0xf4
--#define GDRST 0xc0
--#define  GDRST_FULL	(0<<2)
--#define  GDRST_RENDER	(1<<2)
--#define  GDRST_MEDIA	(3<<2)
-+
-+/* Graphics reset regs */
-+#define I965_GDRST 0xc0 /* PCI config register */
-+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
-+#define  GRDOM_FULL	(0<<2)
-+#define  GRDOM_RENDER	(1<<2)
-+#define  GRDOM_MEDIA	(3<<2)
- 
- /* VGA stuff */
- 
-@@ -192,11 +159,11 @@
- #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
- #define   MI_STORE_DWORD_INDEX_SHIFT 2
- #define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
-+#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
- #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
- #define   MI_BATCH_NON_SECURE	(1)
- #define   MI_BATCH_NON_SECURE_I965 (1<<8)
- #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
--
- /*
-  * 3D instructions used by the kernel
-  */
-@@ -249,6 +216,16 @@
- #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
- #define   PIPE_CONTROL_STALL_EN	(1<<1) /* in addr word, Ironlake+ only */
- 
-+
-+/*
-+ * Reset registers
-+ */
-+#define DEBUG_RESET_I830		0x6070
-+#define  DEBUG_RESET_FULL		(1<<7)
-+#define  DEBUG_RESET_RENDER		(1<<8)
-+#define  DEBUG_RESET_DISPLAY		(1<<9)
-+
-+
- /*
-  * Fence registers
-  */
-@@ -283,6 +260,17 @@
- #define PRB0_HEAD	0x02034
- #define PRB0_START	0x02038
- #define PRB0_CTL	0x0203c
-+#define RENDER_RING_BASE	0x02000
-+#define BSD_RING_BASE		0x04000
-+#define GEN6_BSD_RING_BASE	0x12000
-+#define BLT_RING_BASE		0x22000
-+#define RING_TAIL(base)		((base)+0x30)
-+#define RING_HEAD(base)		((base)+0x34)
-+#define RING_START(base)	((base)+0x38)
-+#define RING_CTL(base)		((base)+0x3c)
-+#define RING_HWS_PGA(base)	((base)+0x80)
-+#define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
-+#define RING_ACTHD(base)	((base)+0x74)
- #define   TAIL_ADDR		0x001FFFF8
- #define   HEAD_WRAP_COUNT	0xFFE00000
- #define   HEAD_WRAP_ONE		0x00200000
-@@ -295,6 +283,8 @@
- #define   RING_VALID_MASK	0x00000001
- #define   RING_VALID		0x00000001
- #define   RING_INVALID		0x00000000
-+#define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
-+#define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
- #define PRB1_TAIL	0x02040 /* 915+ only */
- #define PRB1_HEAD	0x02044 /* 915+ only */
- #define PRB1_START	0x02048 /* 915+ only */
-@@ -306,7 +296,6 @@
- #define INSTDONE1	0x0207c /* 965+ only */
- #define ACTHD_I965	0x02074
- #define HWS_PGA		0x02080
--#define HWS_PGA_GEN6	0x04080
- #define HWS_ADDRESS_MASK	0xfffff000
- #define HWS_START_ADDRESS_SHIFT	4
- #define PWRCTXA		0x2088 /* 965GM+ only */
-@@ -464,17 +453,17 @@
- #define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
- #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
- #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
--/*
-- * BSD (bit stream decoder instruction and interrupt control register defines
-- * (G4X and Ironlake only)
-- */
- 
--#define BSD_RING_TAIL          0x04030
--#define BSD_RING_HEAD          0x04034
--#define BSD_RING_START         0x04038
--#define BSD_RING_CTL           0x0403c
--#define BSD_RING_ACTHD         0x04074
--#define BSD_HWS_PGA            0x04080
-+#define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
-+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16)
-+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0)
-+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE		0
-+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3)
-+
-+#define GEN6_BSD_IMR			0x120a8
-+#define   GEN6_BSD_IMR_USER_INTERRUPT	(1 << 12)
-+
-+#define GEN6_BSD_RNCID			0x12198
- 
- /*
-  * Framebuffer compression (915+ only)
-@@ -579,12 +568,51 @@
- # define GPIO_DATA_VAL_IN		(1 << 12)
- # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
- 
--#define GMBUS0			0x5100
--#define GMBUS1			0x5104
--#define GMBUS2			0x5108
--#define GMBUS3			0x510c
--#define GMBUS4			0x5110
--#define GMBUS5			0x5120
-+#define GMBUS0			0x5100 /* clock/port select */
-+#define   GMBUS_RATE_100KHZ	(0<<8)
-+#define   GMBUS_RATE_50KHZ	(1<<8)
-+#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
-+#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
-+#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
-+#define   GMBUS_PORT_DISABLED	0
-+#define   GMBUS_PORT_SSC	1
-+#define   GMBUS_PORT_VGADDC	2
-+#define   GMBUS_PORT_PANEL	3
-+#define   GMBUS_PORT_DPC	4 /* HDMIC */
-+#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
-+				  /* 6 reserved */
-+#define   GMBUS_PORT_DPD	7 /* HDMID */
-+#define   GMBUS_NUM_PORTS       8
-+#define GMBUS1			0x5104 /* command/status */
-+#define   GMBUS_SW_CLR_INT	(1<<31)
-+#define   GMBUS_SW_RDY		(1<<30)
-+#define   GMBUS_ENT		(1<<29) /* enable timeout */
-+#define   GMBUS_CYCLE_NONE	(0<<25)
-+#define   GMBUS_CYCLE_WAIT	(1<<25)
-+#define   GMBUS_CYCLE_INDEX	(2<<25)
-+#define   GMBUS_CYCLE_STOP	(4<<25)
-+#define   GMBUS_BYTE_COUNT_SHIFT 16
-+#define   GMBUS_SLAVE_INDEX_SHIFT 8
-+#define   GMBUS_SLAVE_ADDR_SHIFT 1
-+#define   GMBUS_SLAVE_READ	(1<<0)
-+#define   GMBUS_SLAVE_WRITE	(0<<0)
-+#define GMBUS2			0x5108 /* status */
-+#define   GMBUS_INUSE		(1<<15)
-+#define   GMBUS_HW_WAIT_PHASE	(1<<14)
-+#define   GMBUS_STALL_TIMEOUT	(1<<13)
-+#define   GMBUS_INT		(1<<12)
-+#define   GMBUS_HW_RDY		(1<<11)
-+#define   GMBUS_SATOER		(1<<10)
-+#define   GMBUS_ACTIVE		(1<<9)
-+#define GMBUS3			0x510c /* data buffer bytes 3-0 */
-+#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
-+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-+#define   GMBUS_NAK_EN		(1<<3)
-+#define   GMBUS_IDLE_EN		(1<<2)
-+#define   GMBUS_HW_WAIT_EN	(1<<1)
-+#define   GMBUS_HW_RDY_EN	(1<<0)
-+#define GMBUS5			0x5120 /* byte index */
-+#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
- 
- /*
-  * Clock control & power management
-@@ -603,6 +631,7 @@
- #define   VGA1_PD_P1_MASK	(0x1f << 8)
- #define DPLL_A	0x06014
- #define DPLL_B	0x06018
-+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
- #define   DPLL_VCO_ENABLE		(1 << 31)
- #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
- #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
-@@ -633,31 +662,6 @@
- #define LVDS			0x61180
- #define LVDS_ON			(1<<31)
- 
--#define ADPA			0x61100
--#define ADPA_DPMS_MASK		(~(3<<10))
--#define ADPA_DPMS_ON		(0<<10)
--#define ADPA_DPMS_SUSPEND	(1<<10)
--#define ADPA_DPMS_STANDBY	(2<<10)
--#define ADPA_DPMS_OFF		(3<<10)
--
--#define RING_TAIL		0x00
--#define TAIL_ADDR		0x001FFFF8
--#define RING_HEAD		0x04
--#define HEAD_WRAP_COUNT		0xFFE00000
--#define HEAD_WRAP_ONE		0x00200000
--#define HEAD_ADDR		0x001FFFFC
--#define RING_START		0x08
--#define START_ADDR		0xFFFFF000
--#define RING_LEN		0x0C
--#define RING_NR_PAGES		0x001FF000
--#define RING_REPORT_MASK	0x00000006
--#define RING_REPORT_64K		0x00000002
--#define RING_REPORT_128K	0x00000004
--#define RING_NO_REPORT		0x00000000
--#define RING_VALID_MASK		0x00000001
--#define RING_VALID		0x00000001
--#define RING_INVALID		0x00000000
--
- /* Scratch pad debug 0 reg:
-  */
- #define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
-@@ -736,10 +740,13 @@
- #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
- #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
- #define DPLL_B_MD 0x06020 /* 965+ only */
-+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
- #define FPA0	0x06040
- #define FPA1	0x06044
- #define FPB0	0x06048
- #define FPB1	0x0604c
-+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
-+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
- #define   FP_N_DIV_MASK		0x003f0000
- #define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
- #define   FP_N_DIV_SHIFT		16
-@@ -760,6 +767,7 @@
- #define   DPLLA_TEST_M_BYPASS		(1 << 2)
- #define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
- #define D_STATE		0x6104
-+#define  DSTATE_GFX_RESET_I830			(1<<6)
- #define  DSTATE_PLL_D3_OFF			(1<<3)
- #define  DSTATE_GFX_CLOCK_GATING		(1<<1)
- #define  DSTATE_DOT_CLOCK_GATING		(1<<0)
-@@ -926,6 +934,8 @@
- #define CLKCFG_MEM_800					(3 << 4)
- #define CLKCFG_MEM_MASK					(7 << 4)
- 
-+#define TSC1			0x11001
-+#define   TSE			(1<<0)
- #define TR1			0x11006
- #define TSFS			0x11020
- #define   TSFS_SLOPE_MASK	0x0000ff00
-@@ -1070,6 +1080,8 @@
- #define   MEMSTAT_SRC_CTL_STDBY 3
- #define RCPREVBSYTUPAVG		0x113b8
- #define RCPREVBSYTDNAVG		0x113bc
-+#define PMMISC			0x11214
-+#define   MCPPCE_EN		(1<<0) /* enable PM_MSG from PCH->MPC */
- #define SDEW			0x1124c
- #define CSIEW0			0x11250
- #define CSIEW1			0x11254
-@@ -1150,6 +1162,15 @@
- #define PIPEBSRC	0x6101c
- #define BCLRPAT_B	0x61020
- 
-+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
-+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
-+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
-+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
-+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
-+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
-+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
-+
- /* VGA port control */
- #define ADPA			0x61100
- #define   ADPA_DAC_ENABLE	(1<<31)
-@@ -1173,6 +1194,7 @@
- #define   ADPA_DPMS_STANDBY	(2<<10)
- #define   ADPA_DPMS_OFF		(3<<10)
- 
-+
- /* Hotplug control (945+ only) */
- #define PORT_HOTPLUG_EN		0x61110
- #define   HDMIB_HOTPLUG_INT_EN			(1 << 29)
-@@ -1331,6 +1353,22 @@
- #define   LVDS_B0B3_POWER_DOWN		(0 << 2)
- #define   LVDS_B0B3_POWER_UP		(3 << 2)
- 
-+/* Video Data Island Packet control */
-+#define VIDEO_DIP_DATA		0x61178
-+#define VIDEO_DIP_CTL		0x61170
-+#define   VIDEO_DIP_ENABLE		(1 << 31)
-+#define   VIDEO_DIP_PORT_B		(1 << 29)
-+#define   VIDEO_DIP_PORT_C		(2 << 29)
-+#define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
-+#define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
-+#define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
-+#define   VIDEO_DIP_SELECT_AVI		(0 << 19)
-+#define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
-+#define   VIDEO_DIP_SELECT_SPD		(3 << 19)
-+#define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
-+#define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
-+#define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
-+
- /* Panel power sequencing */
- #define PP_STATUS	0x61200
- #define   PP_ON		(1 << 31)
-@@ -1346,6 +1384,9 @@
- #define   PP_SEQUENCE_ON	(1 << 28)
- #define   PP_SEQUENCE_OFF	(2 << 28)
- #define   PP_SEQUENCE_MASK	0x30000000
-+#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
-+#define   PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
-+#define   PP_SEQUENCE_STATE_MASK 0x0000000f
- #define PP_CONTROL	0x61204
- #define   POWER_TARGET_ON	(1 << 0)
- #define PP_ON_DELAYS	0x61208
-@@ -1481,6 +1522,7 @@
- # define TV_TEST_MODE_MASK		(7 << 0)
- 
- #define TV_DAC			0x68004
-+# define TV_DAC_SAVE		0x00ffff00
- /**
-  * Reports that DAC state change logic has reported change (RO).
-  *
-@@ -2075,29 +2117,35 @@
- 
- /* Display & cursor control */
- 
--/* dithering flag on Ironlake */
--#define PIPE_ENABLE_DITHER		(1 << 4)
--#define PIPE_DITHER_TYPE_MASK		(3 << 2)
--#define PIPE_DITHER_TYPE_SPATIAL	(0 << 2)
--#define PIPE_DITHER_TYPE_ST01		(1 << 2)
- /* Pipe A */
- #define PIPEADSL		0x70000
--#define   DSL_LINEMASK	       	0x00000fff
-+#define   DSL_LINEMASK		0x00000fff
- #define PIPEACONF		0x70008
--#define   PIPEACONF_ENABLE	(1<<31)
--#define   PIPEACONF_DISABLE	0
--#define   PIPEACONF_DOUBLE_WIDE	(1<<30)
-+#define   PIPECONF_ENABLE	(1<<31)
-+#define   PIPECONF_DISABLE	0
-+#define   PIPECONF_DOUBLE_WIDE	(1<<30)
- #define   I965_PIPECONF_ACTIVE	(1<<30)
--#define   PIPEACONF_SINGLE_WIDE	0
--#define   PIPEACONF_PIPE_UNLOCKED 0
--#define   PIPEACONF_PIPE_LOCKED	(1<<25)
--#define   PIPEACONF_PALETTE	0
--#define   PIPEACONF_GAMMA		(1<<24)
-+#define   PIPECONF_SINGLE_WIDE	0
-+#define   PIPECONF_PIPE_UNLOCKED 0
-+#define   PIPECONF_PIPE_LOCKED	(1<<25)
-+#define   PIPECONF_PALETTE	0
-+#define   PIPECONF_GAMMA		(1<<24)
- #define   PIPECONF_FORCE_BORDER	(1<<25)
- #define   PIPECONF_PROGRESSIVE	(0 << 21)
- #define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
- #define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
- #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
-+#define   PIPECONF_BPP_MASK	(0x000000e0)
-+#define   PIPECONF_BPP_8	(0<<5)
-+#define   PIPECONF_BPP_10	(1<<5)
-+#define   PIPECONF_BPP_6	(2<<5)
-+#define   PIPECONF_BPP_12	(3<<5)
-+#define   PIPECONF_DITHER_EN	(1<<4)
-+#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-+#define   PIPECONF_DITHER_TYPE_SP (0<<2)
-+#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
-+#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
-+#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
- #define PIPEASTAT		0x70024
- #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
- #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
-@@ -2128,12 +2176,15 @@
- #define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
- #define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
- #define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
--#define   PIPE_BPC_MASK 			(7 << 5) /* Ironlake */
-+#define   PIPE_BPC_MASK				(7 << 5) /* Ironlake */
- #define   PIPE_8BPC				(0 << 5)
- #define   PIPE_10BPC				(1 << 5)
- #define   PIPE_6BPC				(2 << 5)
- #define   PIPE_12BPC				(3 << 5)
- 
-+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
-+#define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
-+
- #define DSPARB			0x70030
- #define   DSPARB_CSTART_MASK	(0x7f << 7)
- #define   DSPARB_CSTART_SHIFT	7
-@@ -2206,8 +2257,8 @@
- #define  WM1_LP_SR_EN		(1<<31)
- #define  WM1_LP_LATENCY_SHIFT	24
- #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
--#define  WM1_LP_FBC_LP1_MASK	(0xf<<20)
--#define  WM1_LP_FBC_LP1_SHIFT	20
-+#define  WM1_LP_FBC_MASK	(0xf<<20)
-+#define  WM1_LP_FBC_SHIFT	20
- #define  WM1_LP_SR_MASK		(0x1ff<<8)
- #define  WM1_LP_SR_SHIFT	8
- #define  WM1_LP_CURSOR_MASK	(0x3f)
-@@ -2333,6 +2384,14 @@
- #define DSPASURF		0x7019C /* 965+ only */
- #define DSPATILEOFF		0x701A4 /* 965+ only */
- 
-+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
-+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
-+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
-+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
-+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
-+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
-+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
-+
- /* VBIOS flags */
- #define SWF00			0x71410
- #define SWF01			0x71414
-@@ -2397,6 +2456,7 @@
- #define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
- 
- #define FDI_PLL_BIOS_0  0x46000
-+#define  FDI_PLL_FB_CLOCK_MASK  0xff
- #define FDI_PLL_BIOS_1  0x46004
- #define FDI_PLL_BIOS_2  0x46008
- #define DISPLAY_PORT_PLL_BIOS_0         0x4600c
-@@ -2420,46 +2480,47 @@
- #define PIPEA_DATA_M1           0x60030
- #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
- #define  TU_SIZE_MASK           0x7e000000
--#define  PIPEA_DATA_M1_OFFSET   0
-+#define  PIPE_DATA_M1_OFFSET    0
- #define PIPEA_DATA_N1           0x60034
--#define  PIPEA_DATA_N1_OFFSET   0
-+#define  PIPE_DATA_N1_OFFSET    0
- 
- #define PIPEA_DATA_M2           0x60038
--#define  PIPEA_DATA_M2_OFFSET   0
-+#define  PIPE_DATA_M2_OFFSET    0
- #define PIPEA_DATA_N2           0x6003c
--#define  PIPEA_DATA_N2_OFFSET   0
-+#define  PIPE_DATA_N2_OFFSET    0
- 
- #define PIPEA_LINK_M1           0x60040
--#define  PIPEA_LINK_M1_OFFSET   0
-+#define  PIPE_LINK_M1_OFFSET    0
- #define PIPEA_LINK_N1           0x60044
--#define  PIPEA_LINK_N1_OFFSET   0
-+#define  PIPE_LINK_N1_OFFSET    0
- 
- #define PIPEA_LINK_M2           0x60048
--#define  PIPEA_LINK_M2_OFFSET   0
-+#define  PIPE_LINK_M2_OFFSET    0
- #define PIPEA_LINK_N2           0x6004c
--#define  PIPEA_LINK_N2_OFFSET   0
-+#define  PIPE_LINK_N2_OFFSET    0
- 
- /* PIPEB timing regs are same start from 0x61000 */
- 
- #define PIPEB_DATA_M1           0x61030
--#define  PIPEB_DATA_M1_OFFSET   0
- #define PIPEB_DATA_N1           0x61034
--#define  PIPEB_DATA_N1_OFFSET   0
- 
- #define PIPEB_DATA_M2           0x61038
--#define  PIPEB_DATA_M2_OFFSET   0
- #define PIPEB_DATA_N2           0x6103c
--#define  PIPEB_DATA_N2_OFFSET   0
- 
- #define PIPEB_LINK_M1           0x61040
--#define  PIPEB_LINK_M1_OFFSET   0
- #define PIPEB_LINK_N1           0x61044
--#define  PIPEB_LINK_N1_OFFSET   0
- 
- #define PIPEB_LINK_M2           0x61048
--#define  PIPEB_LINK_M2_OFFSET   0
- #define PIPEB_LINK_N2           0x6104c
--#define  PIPEB_LINK_N2_OFFSET   0
-+
-+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
-+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
-+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
-+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
-+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
-+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
-+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
-+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
- 
- /* CPU panel fitter */
- #define PFA_CTL_1               0x68080
-@@ -2516,7 +2577,8 @@
- #define GT_SYNC_STATUS          (1 << 2)
- #define GT_USER_INTERRUPT       (1 << 0)
- #define GT_BSD_USER_INTERRUPT   (1 << 5)
--
-+#define GT_GEN6_BSD_USER_INTERRUPT	(1 << 12)
-+#define GT_BLT_USER_INTERRUPT	(1 << 22)
- 
- #define GTISR   0x44010
- #define GTIMR   0x44014
-@@ -2551,6 +2613,10 @@
- #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
- #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
- #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
-+#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
-+				 SDE_PORTD_HOTPLUG_CPT |	\
-+				 SDE_PORTC_HOTPLUG_CPT |	\
-+				 SDE_PORTB_HOTPLUG_CPT)
- 
- #define SDEISR  0xc4000
- #define SDEIMR  0xc4004
-@@ -2600,11 +2666,14 @@
- 
- #define PCH_DPLL_A              0xc6014
- #define PCH_DPLL_B              0xc6018
-+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
- 
- #define PCH_FPA0                0xc6040
- #define PCH_FPA1                0xc6044
- #define PCH_FPB0                0xc6048
- #define PCH_FPB1                0xc604c
-+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
-+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
- 
- #define PCH_DPLL_TEST           0xc606c
- 
-@@ -2690,6 +2759,13 @@
- #define TRANS_VBLANK_B          0xe1010
- #define TRANS_VSYNC_B           0xe1014
- 
-+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
-+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
-+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
-+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
-+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
-+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
-+
- #define TRANSB_DATA_M1          0xe1030
- #define TRANSB_DATA_N1          0xe1034
- #define TRANSB_DATA_M2          0xe1038
-@@ -2701,6 +2777,7 @@
- 
- #define TRANSACONF              0xf0008
- #define TRANSBCONF              0xf1008
-+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
- #define  TRANS_DISABLE          (0<<31)
- #define  TRANS_ENABLE           (1<<31)
- #define  TRANS_STATE_MASK       (1<<30)
-@@ -2721,10 +2798,15 @@
- #define FDI_RXA_CHICKEN         0xc200c
- #define FDI_RXB_CHICKEN         0xc2010
- #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
-+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
-+
-+#define SOUTH_DSPCLK_GATE_D	0xc2020
-+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
- 
- /* CPU: FDI_TX */
- #define FDI_TXA_CTL             0x60100
- #define FDI_TXB_CTL             0x61100
-+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
- #define  FDI_TX_DISABLE         (0<<31)
- #define  FDI_TX_ENABLE          (1<<31)
- #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
-@@ -2766,8 +2848,8 @@
- /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
- #define FDI_RXA_CTL             0xf000c
- #define FDI_RXB_CTL             0xf100c
-+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
- #define  FDI_RX_ENABLE          (1<<31)
--#define  FDI_RX_DISABLE         (0<<31)
- /* train, dp width same as FDI_TX */
- #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
- #define  FDI_8BPC                       (0<<16)
-@@ -2782,8 +2864,7 @@
- #define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
- #define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
- #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
--#define  FDI_SEL_RAWCLK                 (0<<4)
--#define  FDI_SEL_PCDCLK                 (1<<4)
-+#define  FDI_PCDCLK	                (1<<4)
- /* CPT */
- #define  FDI_AUTO_TRAINING			(1<<10)
- #define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
-@@ -2798,6 +2879,9 @@
- #define FDI_RXA_TUSIZE2         0xf0038
- #define FDI_RXB_TUSIZE1         0xf1030
- #define FDI_RXB_TUSIZE2         0xf1038
-+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
-+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
-+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
- 
- /* FDI_RX interrupt register format */
- #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
-@@ -2816,6 +2900,8 @@
- #define FDI_RXA_IMR             0xf0018
- #define FDI_RXB_IIR             0xf1014
- #define FDI_RXB_IMR             0xf1018
-+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
-+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
- 
- #define FDI_PLL_CTL_1           0xfe000
- #define FDI_PLL_CTL_2           0xfe004
-@@ -2935,6 +3021,7 @@
- #define TRANS_DP_CTL_A		0xe0300
- #define TRANS_DP_CTL_B		0xe1300
- #define TRANS_DP_CTL_C		0xe2300
-+#define TRANS_DP_CTL(pipe)	(TRANS_DP_CTL_A + (pipe) * 0x01000)
- #define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
- #define  TRANS_DP_PORT_SEL_B	(0<<29)
- #define  TRANS_DP_PORT_SEL_C	(1<<29)
-diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
-index 31f0858..454c064 100644
---- a/drivers/gpu/drm/i915/i915_suspend.c
-+++ b/drivers/gpu/drm/i915/i915_suspend.c
-@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
- 		dev_priv->saveFPA1 = I915_READ(FPA1);
- 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
- 	}
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
- 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
- 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
- 	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- 	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
- 	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- 		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
- 	}
-@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
- 		dev_priv->saveFPB1 = I915_READ(FPB1);
- 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
- 	}
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
- 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
- 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
- 	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- 	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
- 	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
--	if (IS_I965GM(dev) || IS_GM45(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- 		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
- 	}
-@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
- 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- 	POSTING_READ(dpll_a_reg);
- 	udelay(150);
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- 		POSTING_READ(DPLL_A_MD);
- 	}
-@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
- 	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- 	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- 	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- 		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
- 	}
-@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
- 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- 	POSTING_READ(dpll_b_reg);
- 	udelay(150);
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- 		POSTING_READ(DPLL_B_MD);
- 	}
-@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
- 	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- 	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- 	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- 		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
- 	}
-@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
- 	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- 	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- 	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
--	if (!IS_I9XX(dev))
-+	if (IS_GEN2(dev))
- 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
- 
- 	/* CRT state */
-@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
- 		dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- 		dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
--		if (IS_I965G(dev))
-+		if (INTEL_INFO(dev)->gen >= 4)
- 			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- 		if (IS_MOBILE(dev) && !IS_I830(dev))
- 			dev_priv->saveLVDS = I915_READ(LVDS);
-@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
- 	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- 	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- 	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
--	if (!IS_I9XX(dev))
-+	if (IS_GEN2(dev))
- 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
- 
- 	/* CRT state */
-@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
- 		I915_WRITE(ADPA, dev_priv->saveADPA);
- 
- 	/* LVDS state */
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- 
- 	if (HAS_PCH_SPLIT(dev)) {
-@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
- 	/* Clock gating state */
- 	intel_init_clock_gating(dev);
- 
--	if (HAS_PCH_SPLIT(dev))
-+	if (HAS_PCH_SPLIT(dev)) {
- 		ironlake_enable_drps(dev);
-+		intel_init_emon(dev);
-+	}
- 
- 	/* Cache mode state */
- 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-@@ -878,9 +880,7 @@ int i915_restore_state(struct drm_device *dev)
- 	for (i = 0; i < 3; i++)
- 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- 
--	/* I2C state */
--	intel_i2c_reset_gmbus(dev);
-+	intel_i2c_reset(dev);
- 
- 	return 0;
- }
--
-diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
-new file mode 100644
-index 0000000..65c88f9
---- /dev/null
-+++ b/drivers/gpu/drm/i915/intel_acpi.c
-@@ -0,0 +1,286 @@
-+/*
-+ * Intel ACPI functions
-+ *
-+ * _DSM related code stolen from nouveau_acpi.c.
-+ */
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/vga_switcheroo.h>
-+#include <acpi/acpi_drivers.h>
-+
-+#include "drmP.h"
-+
-+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
-+
-+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
-+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
-+
-+static struct intel_dsm_priv {
-+	acpi_handle dhandle;
-+} intel_dsm_priv;
-+
-+static const u8 intel_dsm_guid[] = {
-+	0xd3, 0x73, 0xd8, 0x7e,
-+	0xd0, 0xc2,
-+	0x4f, 0x4e,
-+	0xa8, 0x54,
-+	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
-+};
-+
-+static int intel_dsm(acpi_handle handle, int func, int arg)
-+{
-+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-+	struct acpi_object_list input;
-+	union acpi_object params[4];
-+	union acpi_object *obj;
-+	u32 result;
-+	int ret = 0;
-+
-+	input.count = 4;
-+	input.pointer = params;
-+	params[0].type = ACPI_TYPE_BUFFER;
-+	params[0].buffer.length = sizeof(intel_dsm_guid);
-+	params[0].buffer.pointer = (char *)intel_dsm_guid;
-+	params[1].type = ACPI_TYPE_INTEGER;
-+	params[1].integer.value = INTEL_DSM_REVISION_ID;
-+	params[2].type = ACPI_TYPE_INTEGER;
-+	params[2].integer.value = func;
-+	params[3].type = ACPI_TYPE_INTEGER;
-+	params[3].integer.value = arg;
-+
-+	ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
-+	if (ret) {
-+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
-+		return ret;
-+	}
-+
-+	obj = (union acpi_object *)output.pointer;
-+
-+	result = 0;
-+	switch (obj->type) {
-+	case ACPI_TYPE_INTEGER:
-+		result = obj->integer.value;
-+		break;
-+
-+	case ACPI_TYPE_BUFFER:
-+		if (obj->buffer.length == 4) {
-+			result =(obj->buffer.pointer[0] |
-+				(obj->buffer.pointer[1] <<  8) |
-+				(obj->buffer.pointer[2] << 16) |
-+				(obj->buffer.pointer[3] << 24));
-+			break;
-+		}
-+	default:
-+		ret = -EINVAL;
-+		break;
-+	}
-+	if (result == 0x80000002)
-+		ret = -ENODEV;
-+
-+	kfree(output.pointer);
-+	return ret;
-+}
-+
-+static char *intel_dsm_port_name(u8 id)
-+{
-+	switch (id) {
-+	case 0:
-+		return "Reserved";
-+	case 1:
-+		return "Analog VGA";
-+	case 2:
-+		return "LVDS";
-+	case 3:
-+		return "Reserved";
-+	case 4:
-+		return "HDMI/DVI_B";
-+	case 5:
-+		return "HDMI/DVI_C";
-+	case 6:
-+		return "HDMI/DVI_D";
-+	case 7:
-+		return "DisplayPort_A";
-+	case 8:
-+		return "DisplayPort_B";
-+	case 9:
-+		return "DisplayPort_C";
-+	case 0xa:
-+		return "DisplayPort_D";
-+	case 0xb:
-+	case 0xc:
-+	case 0xd:
-+		return "Reserved";
-+	case 0xe:
-+		return "WiDi";
-+	default:
-+		return "bad type";
-+	}
-+}
-+
-+static char *intel_dsm_mux_type(u8 type)
-+{
-+	switch (type) {
-+	case 0:
-+		return "unknown";
-+	case 1:
-+		return "No MUX, iGPU only";
-+	case 2:
-+		return "No MUX, dGPU only";
-+	case 3:
-+		return "MUXed between iGPU and dGPU";
-+	default:
-+		return "bad type";
-+	}
-+}
-+
-+static void intel_dsm_platform_mux_info(void)
-+{
-+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-+	struct acpi_object_list input;
-+	union acpi_object params[4];
-+	union acpi_object *pkg;
-+	int i, ret;
-+
-+	input.count = 4;
-+	input.pointer = params;
-+	params[0].type = ACPI_TYPE_BUFFER;
-+	params[0].buffer.length = sizeof(intel_dsm_guid);
-+	params[0].buffer.pointer = (char *)intel_dsm_guid;
-+	params[1].type = ACPI_TYPE_INTEGER;
-+	params[1].integer.value = INTEL_DSM_REVISION_ID;
-+	params[2].type = ACPI_TYPE_INTEGER;
-+	params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
-+	params[3].type = ACPI_TYPE_INTEGER;
-+	params[3].integer.value = 0;
-+
-+	ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
-+				   &output);
-+	if (ret) {
-+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
-+		goto out;
-+	}
-+
-+	pkg = (union acpi_object *)output.pointer;
-+
-+	if (pkg->type == ACPI_TYPE_PACKAGE) {
-+		union acpi_object *connector_count = &pkg->package.elements[0];
-+		DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
-+			  (unsigned long long)connector_count->integer.value);
-+		for (i = 1; i < pkg->package.count; i++) {
-+			union acpi_object *obj = &pkg->package.elements[i];
-+			union acpi_object *connector_id =
-+				&obj->package.elements[0];
-+			union acpi_object *info = &obj->package.elements[1];
-+			DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
-+				  (unsigned long long)connector_id->integer.value);
-+			DRM_DEBUG_DRIVER("  port id: %s\n",
-+			       intel_dsm_port_name(info->buffer.pointer[0]));
-+			DRM_DEBUG_DRIVER("  display mux info: %s\n",
-+			       intel_dsm_mux_type(info->buffer.pointer[1]));
-+			DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
-+			       intel_dsm_mux_type(info->buffer.pointer[2]));
-+			DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
-+			       intel_dsm_mux_type(info->buffer.pointer[3]));
-+		}
-+	} else {
-+		DRM_ERROR("MUX INFO call failed\n");
-+	}
-+
-+out:
-+	kfree(output.pointer);
-+}
-+
-+static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
-+{
-+	return 0;
-+}
-+
-+static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
-+				 enum vga_switcheroo_state state)
-+{
-+	return 0;
-+}
-+
-+static int intel_dsm_init(void)
-+{
-+	return 0;
-+}
-+
-+static int intel_dsm_get_client_id(struct pci_dev *pdev)
-+{
-+	if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
-+		return VGA_SWITCHEROO_IGD;
-+	else
-+		return VGA_SWITCHEROO_DIS;
-+}
-+
-+static struct vga_switcheroo_handler intel_dsm_handler = {
-+	.switchto = intel_dsm_switchto,
-+	.power_state = intel_dsm_power_state,
-+	.init = intel_dsm_init,
-+	.get_client_id = intel_dsm_get_client_id,
-+};
-+
-+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
-+{
-+	acpi_handle dhandle, intel_handle;
-+	acpi_status status;
-+	int ret;
-+
-+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
-+	if (!dhandle)
-+		return false;
-+
-+	status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
-+	if (ACPI_FAILURE(status)) {
-+		DRM_DEBUG_KMS("no _DSM method for intel device\n");
-+		return false;
-+	}
-+
-+	ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
-+	if (ret < 0) {
-+		DRM_ERROR("failed to get supported _DSM functions\n");
-+		return false;
-+	}
-+
-+	intel_dsm_priv.dhandle = dhandle;
-+
-+	intel_dsm_platform_mux_info();
-+	return true;
-+}
-+
-+static bool intel_dsm_detect(void)
-+{
-+	char acpi_method_name[255] = { 0 };
-+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
-+	struct pci_dev *pdev = NULL;
-+	bool has_dsm = false;
-+	int vga_count = 0;
-+
-+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
-+		vga_count++;
-+		has_dsm |= intel_dsm_pci_probe(pdev);
-+	}
-+
-+	if (vga_count == 2 && has_dsm) {
-+		acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
-+		DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
-+				 acpi_method_name);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void intel_register_dsm_handler(void)
-+{
-+	if (!intel_dsm_detect())
-+		return;
-+
-+	vga_switcheroo_register_handler(&intel_dsm_handler);
-+}
-+
-+void intel_unregister_dsm_handler(void)
-+{
-+	vga_switcheroo_unregister_handler();
-+}
-diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
-index 96f75d7..b0b1200 100644
---- a/drivers/gpu/drm/i915/intel_bios.c
-+++ b/drivers/gpu/drm/i915/intel_bios.c
-@@ -24,6 +24,7 @@
-  *    Eric Anholt <eric@anholt.net>
-  *
-  */
-+#include <drm/drm_dp_helper.h>
- #include "drmP.h"
- #include "drm.h"
- #include "i915_drm.h"
-@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- 	int i, temp_downclock;
- 	struct drm_display_mode *temp_mode;
- 
--	/* Defaults if we can't find VBT info */
--	dev_priv->lvds_dither = 0;
--	dev_priv->lvds_vbt = 0;
--
- 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
- 	if (!lvds_options)
- 		return;
-@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- 	dev_priv->lvds_dither = lvds_options->pixel_dither;
- 	if (lvds_options->panel_type == 0xff)
- 		return;
-+
- 	panel_type = lvds_options->panel_type;
- 
- 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
-@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- 			((unsigned char *)entry + dvo_timing_offset);
- 
- 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-+	if (!panel_fixed_mode)
-+		return;
- 
- 	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
- 
-@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
- 	struct lvds_dvo_timing *dvo_timing;
- 	struct drm_display_mode *panel_fixed_mode;
- 
--	dev_priv->sdvo_lvds_vbt_mode = NULL;
--
- 	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
- 	if (!sdvo_lvds_options)
- 		return;
-@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
- 	struct drm_device *dev = dev_priv->dev;
- 	struct bdb_general_features *general;
- 
--	/* Set sensible defaults in case we can't find the general block */
--	dev_priv->int_tv_support = 1;
--	dev_priv->int_crt_support = 1;
--
- 	general = find_section(bdb, BDB_GENERAL_FEATURES);
- 	if (general) {
- 		dev_priv->int_tv_support = general->int_tv_support;
-@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
- 		dev_priv->lvds_use_ssc = general->enable_ssc;
- 
- 		if (dev_priv->lvds_use_ssc) {
--			if (IS_I85X(dev_priv->dev))
-+			if (IS_I85X(dev))
- 				dev_priv->lvds_ssc_freq =
- 					general->ssc_freq ? 66 : 48;
--			else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
-+			else if (IS_GEN5(dev) || IS_GEN6(dev))
- 				dev_priv->lvds_ssc_freq =
- 					general->ssc_freq ? 100 : 120;
- 			else
-@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
- 			  struct bdb_header *bdb)
- {
- 	struct bdb_general_definitions *general;
--	const int crt_bus_map_table[] = {
--		GPIOB,
--		GPIOA,
--		GPIOC,
--		GPIOD,
--		GPIOE,
--		GPIOF,
--	};
- 
- 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- 	if (general) {
-@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
- 		if (block_size >= sizeof(*general)) {
- 			int bus_pin = general->crt_ddc_gmbus_pin;
- 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
--			if ((bus_pin >= 1) && (bus_pin <= 6)) {
--				dev_priv->crt_ddc_bus =
--					crt_bus_map_table[bus_pin-1];
--			}
-+			if (bus_pin >= 1 && bus_pin <= 6)
-+				dev_priv->crt_ddc_pin = bus_pin;
- 		} else {
- 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- 				  block_size);
-@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
- 
- static void
- parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
--		       struct bdb_header *bdb)
-+			  struct bdb_header *bdb)
- {
- 	struct sdvo_device_mapping *p_mapping;
- 	struct bdb_general_definitions *p_defs;
-@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- 
- 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- 	if (!p_defs) {
--		DRM_DEBUG_KMS("No general definition block is found\n");
-+		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
- 		return;
- 	}
- 	/* judge whether the size of child device meets the requirements.
-@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- 			p_mapping->slave_addr = p_child->slave_addr;
- 			p_mapping->dvo_wiring = p_child->dvo_wiring;
- 			p_mapping->ddc_pin = p_child->ddc_pin;
-+			p_mapping->i2c_pin = p_child->i2c_pin;
-+			p_mapping->i2c_speed = p_child->i2c_speed;
- 			p_mapping->initialized = 1;
-+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
-+				      p_mapping->dvo_port,
-+				      p_mapping->slave_addr,
-+				      p_mapping->dvo_wiring,
-+				      p_mapping->ddc_pin,
-+				      p_mapping->i2c_pin,
-+				      p_mapping->i2c_speed);
- 		} else {
- 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
- 					 "two SDVO device.\n");
-@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
- 	if (!driver)
- 		return;
- 
--	if (driver && SUPPORTS_EDP(dev) &&
--	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
--		dev_priv->edp_support = 1;
--	} else {
--		dev_priv->edp_support = 0;
--	}
-+	if (SUPPORTS_EDP(dev) &&
-+	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-+		dev_priv->edp.support = 1;
- 
--	if (driver && driver->dual_frequency)
-+	if (driver->dual_frequency)
- 		dev_priv->render_reclock_avail = true;
- }
- 
-@@ -424,27 +414,78 @@ static void
- parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
- {
- 	struct bdb_edp *edp;
-+	struct edp_power_seq *edp_pps;
-+	struct edp_link_params *edp_link_params;
- 
- 	edp = find_section(bdb, BDB_EDP);
- 	if (!edp) {
--		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
-+		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
--				      "supported, assume 18bpp panel color "
--				      "depth.\n");
--			dev_priv->edp_bpp = 18;
-+				      "supported, assume %dbpp panel color "
-+				      "depth.\n",
-+				      dev_priv->edp.bpp);
- 		}
- 		return;
- 	}
- 
- 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
- 	case EDP_18BPP:
--		dev_priv->edp_bpp = 18;
-+		dev_priv->edp.bpp = 18;
- 		break;
- 	case EDP_24BPP:
--		dev_priv->edp_bpp = 24;
-+		dev_priv->edp.bpp = 24;
- 		break;
- 	case EDP_30BPP:
--		dev_priv->edp_bpp = 30;
-+		dev_priv->edp.bpp = 30;
-+		break;
-+	}
-+
-+	/* Get the eDP sequencing and link info */
-+	edp_pps = &edp->power_seqs[panel_type];
-+	edp_link_params = &edp->link_params[panel_type];
-+
-+	dev_priv->edp.pps = *edp_pps;
-+
-+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
-+		DP_LINK_BW_1_62;
-+	switch (edp_link_params->lanes) {
-+	case 0:
-+		dev_priv->edp.lanes = 1;
-+		break;
-+	case 1:
-+		dev_priv->edp.lanes = 2;
-+		break;
-+	case 3:
-+	default:
-+		dev_priv->edp.lanes = 4;
-+		break;
-+	}
-+	switch (edp_link_params->preemphasis) {
-+	case 0:
-+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
-+		break;
-+	case 1:
-+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
-+		break;
-+	case 2:
-+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
-+		break;
-+	case 3:
-+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
-+		break;
-+	}
-+	switch (edp_link_params->vswing) {
-+	case 0:
-+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
-+		break;
-+	case 1:
-+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
-+		break;
-+	case 2:
-+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
-+		break;
-+	case 3:
-+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
- 		break;
- 	}
- }
-@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
- 
- 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- 	if (!p_defs) {
--		DRM_DEBUG_KMS("No general definition block is found\n");
-+		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
- 		return;
- 	}
- 	/* judge whether the size of child device meets the requirements.
-@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
- 	}
- 	return;
- }
-+
-+static void
-+init_vbt_defaults(struct drm_i915_private *dev_priv)
-+{
-+	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
-+
-+	/* LFP panel data */
-+	dev_priv->lvds_dither = 1;
-+	dev_priv->lvds_vbt = 0;
-+
-+	/* SDVO panel data */
-+	dev_priv->sdvo_lvds_vbt_mode = NULL;
-+
-+	/* general features */
-+	dev_priv->int_tv_support = 1;
-+	dev_priv->int_crt_support = 1;
-+	dev_priv->lvds_use_ssc = 0;
-+
-+	/* eDP data */
-+	dev_priv->edp.bpp = 18;
-+}
-+
- /**
-- * intel_init_bios - initialize VBIOS settings & find VBT
-+ * intel_parse_bios - find VBT and initialize settings from the BIOS
-  * @dev: DRM device
-  *
-  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
-  * to appropriate values.
-  *
-- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
-- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
-- * feed an updated VBT back through that, compared to what we'll fetch using
-- * this method of groping around in the BIOS data.
-- *
-  * Returns 0 on success, nonzero on failure.
-  */
- bool
--intel_init_bios(struct drm_device *dev)
-+intel_parse_bios(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct pci_dev *pdev = dev->pdev;
--	struct vbt_header *vbt = NULL;
--	struct bdb_header *bdb;
--	u8 __iomem *bios;
--	size_t size;
--	int i;
--
--	bios = pci_map_rom(pdev, &size);
--	if (!bios)
--		return -1;
--
--	/* Scour memory looking for the VBT signature */
--	for (i = 0; i + 4 < size; i++) {
--		if (!memcmp(bios + i, "$VBT", 4)) {
--			vbt = (struct vbt_header *)(bios + i);
--			break;
--		}
-+	struct bdb_header *bdb = NULL;
-+	u8 __iomem *bios = NULL;
-+
-+	init_vbt_defaults(dev_priv);
-+
-+	/* XXX Should this validation be moved to intel_opregion.c? */
-+	if (dev_priv->opregion.vbt) {
-+		struct vbt_header *vbt = dev_priv->opregion.vbt;
-+		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
-+			DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
-+					 vbt->signature);
-+			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
-+		} else
-+			dev_priv->opregion.vbt = NULL;
- 	}
- 
--	if (!vbt) {
--		DRM_ERROR("VBT signature missing\n");
--		pci_unmap_rom(pdev, bios);
--		return -1;
--	}
-+	if (bdb == NULL) {
-+		struct vbt_header *vbt = NULL;
-+		size_t size;
-+		int i;
- 
--	bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-+		bios = pci_map_rom(pdev, &size);
-+		if (!bios)
-+			return -1;
-+
-+		/* Scour memory looking for the VBT signature */
-+		for (i = 0; i + 4 < size; i++) {
-+			if (!memcmp(bios + i, "$VBT", 4)) {
-+				vbt = (struct vbt_header *)(bios + i);
-+				break;
-+			}
-+		}
-+
-+		if (!vbt) {
-+			DRM_ERROR("VBT signature missing\n");
-+			pci_unmap_rom(pdev, bios);
-+			return -1;
-+		}
-+
-+		bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-+	}
- 
- 	/* Grab useful general definitions */
- 	parse_general_features(dev_priv, bdb);
-@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
- 	parse_driver_features(dev_priv, bdb);
- 	parse_edp(dev_priv, bdb);
- 
--	pci_unmap_rom(pdev, bios);
-+	if (bios)
-+		pci_unmap_rom(pdev, bios);
- 
- 	return 0;
- }
-+
-+/* Ensure that vital registers have been initialised, even if the BIOS
-+ * is absent or just failing to do its job.
-+ */
-+void intel_setup_bios(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+	 /* Set the Panel Power On/Off timings if uninitialized. */
-+	if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
-+		/* Set T2 to 40ms and T5 to 200ms */
-+		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
-+
-+		/* Set T3 to 35ms and Tx to 200ms */
-+		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
-+	}
-+}
-diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
-index 4c18514..5f8e4ed 100644
---- a/drivers/gpu/drm/i915/intel_bios.h
-+++ b/drivers/gpu/drm/i915/intel_bios.h
-@@ -197,7 +197,8 @@ struct bdb_general_features {
- struct child_device_config {
- 	u16 handle;
- 	u16 device_type;
--	u8  device_id[10]; /* See DEVICE_TYPE_* above */
-+	u8  i2c_speed;
-+	u8  rsvd[9];
- 	u16 addin_offset;
- 	u8  dvo_port; /* See Device_PORT_* above */
- 	u8  i2c_pin;
-@@ -466,7 +467,8 @@ struct bdb_edp {
- 	struct edp_link_params link_params[16];
- } __attribute__ ((packed));
- 
--bool intel_init_bios(struct drm_device *dev);
-+void intel_setup_bios(struct drm_device *dev);
-+bool intel_parse_bios(struct drm_device *dev);
- 
- /*
-  * Driver<->VBIOS interaction occurs through scratch bits in
-diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
-index 197d4f3..c55c770 100644
---- a/drivers/gpu/drm/i915/intel_crt.c
-+++ b/drivers/gpu/drm/i915/intel_crt.c
-@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
- 	if (mode->clock < 25000)
- 		return MODE_CLOCK_LOW;
- 
--	if (!IS_I9XX(dev))
-+	if (IS_GEN2(dev))
- 		max_clock = 350000;
- 	else
- 		max_clock = 400000;
-@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
- 	 * Disable separate mode multiplier used when cloning SDVO to CRT
- 	 * XXX this needs to be adjusted when we really are cloning
- 	 */
--	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- 		dpll_md = I915_READ(dpll_md_reg);
- 		I915_WRITE(dpll_md_reg,
- 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
-@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
- 	I915_WRITE(PCH_ADPA, adpa);
- 
- 	if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
--		     1000, 1))
-+		     1000))
- 		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- 
- 	if (turn_off_dac) {
--		I915_WRITE(PCH_ADPA, temp);
-+		/* Make sure hotplug is enabled */
-+		I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
- 		(void)I915_READ(PCH_ADPA);
- 	}
- 
-@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
- 		/* wait for FORCE_DETECT to go off */
- 		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
- 			      CRT_HOTPLUG_FORCE_DETECT) == 0,
--			     1000, 1))
-+			     1000))
- 			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
- 	}
- 
-@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
- 	return ret;
- }
- 
-+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
-+{
-+	u8 buf;
-+	struct i2c_msg msgs[] = {
-+		{
-+			.addr = 0xA0,
-+			.flags = 0,
-+			.len = 1,
-+			.buf = &buf,
-+		},
-+	};
-+	/* DDC monitor detect: Does it ACK a write to 0xA0? */
-+	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
-+}
-+
- static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
- {
--	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- 
- 	/* CRT should always be at 0, but check anyway */
- 	if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
- 		return false;
- 
--	return intel_ddc_probe(intel_encoder);
-+	if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
-+		DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
-+		return true;
-+	}
-+
-+	if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
-+		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-+		return true;
-+	}
-+
-+	return false;
- }
- 
- static enum drm_connector_status
- intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
- {
--	struct drm_encoder *encoder = &intel_encoder->enc;
-+	struct drm_encoder *encoder = &intel_encoder->base;
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
- 	uint8_t	st00;
- 	enum drm_connector_status status;
- 
-+	DRM_DEBUG_KMS("starting load-detect on CRT\n");
-+
- 	if (pipe == 0) {
- 		bclrpat_reg = BCLRPAT_A;
- 		vtotal_reg = VTOTAL_A;
-@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
- 	/* Set the border color to purple. */
- 	I915_WRITE(bclrpat_reg, 0x500050);
- 
--	if (IS_I9XX(dev)) {
-+	if (!IS_GEN2(dev)) {
- 		uint32_t pipeconf = I915_READ(pipeconf_reg);
- 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
-+		POSTING_READ(pipeconf_reg);
- 		/* Wait for next Vblank to substitue
- 		 * border color for Color info */
- 		intel_wait_for_vblank(dev, pipe);
-@@ -404,34 +434,37 @@ static enum drm_connector_status
- intel_crt_detect(struct drm_connector *connector, bool force)
- {
- 	struct drm_device *dev = connector->dev;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-+	struct intel_encoder *encoder = intel_attached_encoder(connector);
- 	struct drm_crtc *crtc;
- 	int dpms_mode;
- 	enum drm_connector_status status;
- 
--	if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
--		if (intel_crt_detect_hotplug(connector))
-+	if (I915_HAS_HOTPLUG(dev)) {
-+		if (intel_crt_detect_hotplug(connector)) {
-+			DRM_DEBUG_KMS("CRT detected via hotplug\n");
- 			return connector_status_connected;
--		else
-+		} else
- 			return connector_status_disconnected;
- 	}
- 
--	if (intel_crt_detect_ddc(encoder))
-+	if (intel_crt_detect_ddc(&encoder->base))
- 		return connector_status_connected;
- 
- 	if (!force)
- 		return connector->status;
- 
- 	/* for pre-945g platforms use load detect */
--	if (encoder->crtc && encoder->crtc->enabled) {
--		status = intel_crt_load_detect(encoder->crtc, intel_encoder);
-+	if (encoder->base.crtc && encoder->base.crtc->enabled) {
-+		status = intel_crt_load_detect(encoder->base.crtc, encoder);
- 	} else {
--		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
-+		crtc = intel_get_load_detect_pipe(encoder, connector,
- 						  NULL, &dpms_mode);
- 		if (crtc) {
--			status = intel_crt_load_detect(crtc, intel_encoder);
--			intel_release_load_detect_pipe(intel_encoder,
-+			if (intel_crt_detect_ddc(&encoder->base))
-+				status = connector_status_connected;
-+			else
-+				status = intel_crt_load_detect(crtc, encoder);
-+			intel_release_load_detect_pipe(encoder,
- 						       connector, dpms_mode);
- 		} else
- 			status = connector_status_unknown;
-@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
- 
- static int intel_crt_get_modes(struct drm_connector *connector)
- {
--	int ret;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--	struct i2c_adapter *ddc_bus;
- 	struct drm_device *dev = connector->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret;
- 
--
--	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
-+	ret = intel_ddc_get_modes(connector,
-+				 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
- 	if (ret || !IS_G4X(dev))
--		goto end;
-+		return ret;
- 
- 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
--	ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
--
--	if (!ddc_bus) {
--		dev_printk(KERN_ERR, &connector->dev->pdev->dev,
--			   "DDC bus registration failed for CRTDDC_D.\n");
--		goto end;
--	}
--	/* Try to get modes by GPIOD port */
--	ret = intel_ddc_get_modes(connector, ddc_bus);
--	intel_i2c_destroy(ddc_bus);
--
--end:
--	return ret;
--
-+	return intel_ddc_get_modes(connector,
-+				   &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
- }
- 
- static int intel_crt_set_property(struct drm_connector *connector,
-@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
- static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
- 	.mode_valid = intel_crt_mode_valid,
- 	.get_modes = intel_crt_get_modes,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static const struct drm_encoder_funcs intel_crt_enc_funcs = {
-@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
- 	struct intel_encoder *intel_encoder;
- 	struct intel_connector *intel_connector;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 i2c_reg;
- 
- 	intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
- 	if (!intel_encoder)
-@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
- 	drm_connector_init(dev, &intel_connector->base,
- 			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- 
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
- 			 DRM_MODE_ENCODER_DAC);
- 
--	drm_mode_connector_attach_encoder(&intel_connector->base,
--					  &intel_encoder->enc);
--
--	/* Set up the DDC bus. */
--	if (HAS_PCH_SPLIT(dev))
--		i2c_reg = PCH_GPIOA;
--	else {
--		i2c_reg = GPIOA;
--		/* Use VBT information for CRT DDC if available */
--		if (dev_priv->crt_ddc_bus != 0)
--			i2c_reg = dev_priv->crt_ddc_bus;
--	}
--	intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
--	if (!intel_encoder->ddc_bus) {
--		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
--			   "failed.\n");
--		return;
--	}
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
- 
- 	intel_encoder->type = INTEL_OUTPUT_ANALOG;
- 	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
- 	connector->interlace_allowed = 1;
- 	connector->doublescan_allowed = 0;
- 
--	drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
-+	drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
- 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- 
- 	drm_sysfs_connector_add(connector);
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 9792285..0cece04 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -43,8 +43,8 @@
- 
- bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
- static void intel_update_watermarks(struct drm_device *dev);
--static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
--static void intel_crtc_update_cursor(struct drm_crtc *crtc);
-+static void intel_increase_pllclock(struct drm_crtc *crtc);
-+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
- 
- typedef struct {
-     /* given values */
-@@ -342,6 +342,16 @@ static bool
- intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
- 			   int target, int refclk, intel_clock_t *best_clock);
- 
-+static inline u32 /* units of 100MHz */
-+intel_fdi_link_freq(struct drm_device *dev)
-+{
-+	if (IS_GEN5(dev)) {
-+		struct drm_i915_private *dev_priv = dev->dev_private;
-+		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
-+	} else
-+		return 27;
-+}
-+
- static const intel_limit_t intel_limits_i8xx_dvo = {
-         .dot = { .min = I8XX_DOT_MIN,		.max = I8XX_DOT_MAX },
-         .vco = { .min = I8XX_VCO_MIN,		.max = I8XX_VCO_MAX },
-@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
- 		limit = intel_ironlake_limit(crtc);
- 	else if (IS_G4X(dev)) {
- 		limit = intel_g4x_limit(crtc);
--	} else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
--		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
--			limit = &intel_limits_i9xx_lvds;
--		else
--			limit = &intel_limits_i9xx_sdvo;
- 	} else if (IS_PINEVIEW(dev)) {
- 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- 			limit = &intel_limits_pineview_lvds;
- 		else
- 			limit = &intel_limits_pineview_sdvo;
-+	} else if (!IS_GEN2(dev)) {
-+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-+			limit = &intel_limits_i9xx_lvds;
-+		else
-+			limit = &intel_limits_i9xx_sdvo;
- 	} else {
- 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- 			limit = &intel_limits_i8xx_lvds;
-@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
- /**
-  * Returns whether any output on the specified pipe is of the specified type
-  */
--bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
-+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
- {
--    struct drm_device *dev = crtc->dev;
--    struct drm_mode_config *mode_config = &dev->mode_config;
--    struct drm_encoder *l_entry;
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_mode_config *mode_config = &dev->mode_config;
-+	struct intel_encoder *encoder;
- 
--    list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
--	    if (l_entry && l_entry->crtc == crtc) {
--		    struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
--		    if (intel_encoder->type == type)
--			    return true;
--	    }
--    }
--    return false;
-+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
-+		if (encoder->base.crtc == crtc && encoder->type == type)
-+			return true;
-+
-+	return false;
- }
- 
- #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
-@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- 	struct drm_device *dev = crtc->dev;
- 	intel_clock_t clock;
- 
--	/* return directly when it is eDP */
--	if (HAS_eDP)
--		return true;
--
- 	if (target < 200000) {
- 		clock.n = 1;
- 		clock.p1 = 2;
-@@ -955,26 +958,26 @@ static bool
- intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- 		      int target, int refclk, intel_clock_t *best_clock)
- {
--    intel_clock_t clock;
--    if (target < 200000) {
--	clock.p1 = 2;
--	clock.p2 = 10;
--	clock.n = 2;
--	clock.m1 = 23;
--	clock.m2 = 8;
--    } else {
--	clock.p1 = 1;
--	clock.p2 = 10;
--	clock.n = 1;
--	clock.m1 = 14;
--	clock.m2 = 2;
--    }
--    clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
--    clock.p = (clock.p1 * clock.p2);
--    clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
--    clock.vco = 0;
--    memcpy(best_clock, &clock, sizeof(intel_clock_t));
--    return true;
-+	intel_clock_t clock;
-+	if (target < 200000) {
-+		clock.p1 = 2;
-+		clock.p2 = 10;
-+		clock.n = 2;
-+		clock.m1 = 23;
-+		clock.m2 = 8;
-+	} else {
-+		clock.p1 = 1;
-+		clock.p2 = 10;
-+		clock.n = 1;
-+		clock.m1 = 14;
-+		clock.m2 = 2;
-+	}
-+	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
-+	clock.p = (clock.p1 * clock.p2);
-+	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
-+	clock.vco = 0;
-+	memcpy(best_clock, &clock, sizeof(intel_clock_t));
-+	return true;
- }
- 
- /**
-@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
- 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
- 
- 	/* Wait for vblank interrupt bit to set */
--	if (wait_for((I915_READ(pipestat_reg) &
--		      PIPE_VBLANK_INTERRUPT_STATUS),
--		     50, 0))
-+	if (wait_for(I915_READ(pipestat_reg) &
-+		     PIPE_VBLANK_INTERRUPT_STATUS,
-+		     50))
- 		DRM_DEBUG_KMS("vblank wait timed out\n");
- }
- 
-@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-  * Otherwise:
-  *   wait for the display line value to settle (it usually
-  *   ends up stopping at the start of the next frame).
-- *  
-+ *
-  */
--static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
-+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 
- 	if (INTEL_INFO(dev)->gen >= 4) {
--		int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
-+		int reg = PIPECONF(pipe);
- 
- 		/* Wait for the Pipe State to go off */
--		if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
--			     100, 0))
-+		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
-+			     100))
- 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
- 	} else {
- 		u32 last_line;
--		int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
-+		int reg = PIPEDSL(pipe);
- 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
- 
- 		/* Wait for the display line to settle */
- 		do {
--			last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
-+			last_line = I915_READ(reg) & DSL_LINEMASK;
- 			mdelay(5);
--		} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
-+		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
- 			 time_after(timeout, jiffies));
- 		if (time_after(jiffies, timeout))
- 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
- 	}
- }
- 
--/* Parameters have changed, update FBC info */
- static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- {
- 	struct drm_device *dev = crtc->dev;
-@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 	int plane, i;
- 	u32 fbc_ctl, fbc_ctl2;
- 
-+	if (fb->pitch == dev_priv->cfb_pitch &&
-+	    obj_priv->fence_reg == dev_priv->cfb_fence &&
-+	    intel_crtc->plane == dev_priv->cfb_plane &&
-+	    I915_READ(FBC_CONTROL) & FBC_CTL_EN)
-+		return;
-+
-+	i8xx_disable_fbc(dev);
-+
- 	dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- 
- 	if (fb->pitch < dev_priv->cfb_pitch)
-@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 	I915_WRITE(FBC_CONTROL, fbc_ctl);
- 
- 	DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
--		  dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
-+		      dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
- }
- 
- void i8xx_disable_fbc(struct drm_device *dev)
-@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	u32 fbc_ctl;
- 
--	if (!I915_HAS_FBC(dev))
--		return;
--
--	if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
--		return;	/* Already off, just return */
--
- 	/* Disable compression */
- 	fbc_ctl = I915_READ(FBC_CONTROL);
-+	if ((fbc_ctl & FBC_CTL_EN) == 0)
-+		return;
-+
- 	fbc_ctl &= ~FBC_CTL_EN;
- 	I915_WRITE(FBC_CONTROL, fbc_ctl);
- 
- 	/* Wait for compressing bit to clear */
--	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
-+	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- 		DRM_DEBUG_KMS("FBC idle timed out\n");
- 		return;
- 	}
-@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--	int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
--		     DPFC_CTL_PLANEB);
-+	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- 	unsigned long stall_watermark = 200;
- 	u32 dpfc_ctl;
- 
-+	dpfc_ctl = I915_READ(DPFC_CONTROL);
-+	if (dpfc_ctl & DPFC_CTL_EN) {
-+		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-+		    dev_priv->cfb_fence == obj_priv->fence_reg &&
-+		    dev_priv->cfb_plane == intel_crtc->plane &&
-+		    dev_priv->cfb_y == crtc->y)
-+			return;
-+
-+		I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
-+		POSTING_READ(DPFC_CONTROL);
-+		intel_wait_for_vblank(dev, intel_crtc->pipe);
-+	}
-+
- 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- 	dev_priv->cfb_fence = obj_priv->fence_reg;
- 	dev_priv->cfb_plane = intel_crtc->plane;
-+	dev_priv->cfb_y = crtc->y;
- 
- 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
-@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 		I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- 	}
- 
--	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- 	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
-@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
- 
- 	/* Disable compression */
- 	dpfc_ctl = I915_READ(DPFC_CONTROL);
--	dpfc_ctl &= ~DPFC_CTL_EN;
--	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-+	if (dpfc_ctl & DPFC_CTL_EN) {
-+		dpfc_ctl &= ~DPFC_CTL_EN;
-+		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- 
--	DRM_DEBUG_KMS("disabled FBC\n");
-+		DRM_DEBUG_KMS("disabled FBC\n");
-+	}
- }
- 
- static bool g4x_fbc_enabled(struct drm_device *dev)
-@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--	int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
--					       DPFC_CTL_PLANEB;
-+	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- 	unsigned long stall_watermark = 200;
- 	u32 dpfc_ctl;
- 
-+	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-+	if (dpfc_ctl & DPFC_CTL_EN) {
-+		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-+		    dev_priv->cfb_fence == obj_priv->fence_reg &&
-+		    dev_priv->cfb_plane == intel_crtc->plane &&
-+		    dev_priv->cfb_offset == obj_priv->gtt_offset &&
-+		    dev_priv->cfb_y == crtc->y)
-+			return;
-+
-+		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
-+		POSTING_READ(ILK_DPFC_CONTROL);
-+		intel_wait_for_vblank(dev, intel_crtc->pipe);
-+	}
-+
- 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- 	dev_priv->cfb_fence = obj_priv->fence_reg;
- 	dev_priv->cfb_plane = intel_crtc->plane;
-+	dev_priv->cfb_offset = obj_priv->gtt_offset;
-+	dev_priv->cfb_y = crtc->y;
- 
--	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- 	dpfc_ctl &= DPFC_RESERVED;
- 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- 	if (obj_priv->tiling_mode != I915_TILING_NONE) {
-@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
- 		I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- 	}
- 
--	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- 	I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
- 	/* enable it... */
--	I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
--		   DPFC_CTL_EN);
-+	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- 
- 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
- }
-@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
- 
- 	/* Disable compression */
- 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
--	dpfc_ctl &= ~DPFC_CTL_EN;
--	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-+	if (dpfc_ctl & DPFC_CTL_EN) {
-+		dpfc_ctl &= ~DPFC_CTL_EN;
-+		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- 
--	DRM_DEBUG_KMS("disabled FBC\n");
-+		DRM_DEBUG_KMS("disabled FBC\n");
-+	}
- }
- 
- static bool ironlake_fbc_enabled(struct drm_device *dev)
-@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
- 
- /**
-  * intel_update_fbc - enable/disable FBC as needed
-- * @crtc: CRTC to point the compressor at
-- * @mode: mode in use
-+ * @dev: the drm_device
-  *
-  * Set up the framebuffer compression hardware at mode set time.  We
-  * enable it if possible:
-@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
-  *
-  * We need to enable/disable FBC on a global basis.
-  */
--static void intel_update_fbc(struct drm_crtc *crtc,
--			     struct drm_display_mode *mode)
-+static void intel_update_fbc(struct drm_device *dev)
- {
--	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_framebuffer *fb = crtc->fb;
-+	struct drm_crtc *crtc = NULL, *tmp_crtc;
-+	struct intel_crtc *intel_crtc;
-+	struct drm_framebuffer *fb;
- 	struct intel_framebuffer *intel_fb;
- 	struct drm_i915_gem_object *obj_priv;
--	struct drm_crtc *tmp_crtc;
--	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--	int plane = intel_crtc->plane;
--	int crtcs_enabled = 0;
- 
- 	DRM_DEBUG_KMS("\n");
- 
-@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
- 	if (!I915_HAS_FBC(dev))
- 		return;
- 
--	if (!crtc->fb)
--		return;
--
--	intel_fb = to_intel_framebuffer(fb);
--	obj_priv = to_intel_bo(intel_fb->obj);
--
- 	/*
- 	 * If FBC is already on, we just have to verify that we can
- 	 * keep it that way...
-@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
- 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
- 	 */
- 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
--		if (tmp_crtc->enabled)
--			crtcs_enabled++;
-+		if (tmp_crtc->enabled) {
-+			if (crtc) {
-+				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-+				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
-+				goto out_disable;
-+			}
-+			crtc = tmp_crtc;
-+		}
- 	}
--	DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
--	if (crtcs_enabled > 1) {
--		DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
--		dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
-+
-+	if (!crtc || crtc->fb == NULL) {
-+		DRM_DEBUG_KMS("no output, disabling\n");
-+		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
- 		goto out_disable;
- 	}
-+
-+	intel_crtc = to_intel_crtc(crtc);
-+	fb = crtc->fb;
-+	intel_fb = to_intel_framebuffer(fb);
-+	obj_priv = to_intel_bo(intel_fb->obj);
-+
- 	if (intel_fb->obj->size > dev_priv->cfb_size) {
- 		DRM_DEBUG_KMS("framebuffer too large, disabling "
--				"compression\n");
-+			      "compression\n");
- 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- 		goto out_disable;
- 	}
--	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
--	    (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-+	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
-+	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- 		DRM_DEBUG_KMS("mode incompatible with compression, "
--				"disabling\n");
-+			      "disabling\n");
- 		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
- 		goto out_disable;
- 	}
--	if ((mode->hdisplay > 2048) ||
--	    (mode->vdisplay > 1536)) {
-+	if ((crtc->mode.hdisplay > 2048) ||
-+	    (crtc->mode.vdisplay > 1536)) {
- 		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
- 		goto out_disable;
- 	}
--	if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
-+	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
- 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
- 		goto out_disable;
-@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
- 	if (in_dbg_master())
- 		goto out_disable;
- 
--	if (intel_fbc_enabled(dev)) {
--		/* We can re-enable it in this case, but need to update pitch */
--		if ((fb->pitch > dev_priv->cfb_pitch) ||
--		    (obj_priv->fence_reg != dev_priv->cfb_fence) ||
--		    (plane != dev_priv->cfb_plane))
--			intel_disable_fbc(dev);
--	}
--
--	/* Now try to turn it back on if possible */
--	if (!intel_fbc_enabled(dev))
--		intel_enable_fbc(crtc, 500);
--
-+	intel_enable_fbc(crtc, 500);
- 	return;
- 
- out_disable:
-@@ -1407,7 +1432,9 @@ out_disable:
- }
- 
- int
--intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
-+intel_pin_and_fence_fb_obj(struct drm_device *dev,
-+			   struct drm_gem_object *obj,
-+			   bool pipelined)
- {
- 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 	u32 alignment;
-@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
- 	case I915_TILING_NONE:
- 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- 			alignment = 128 * 1024;
--		else if (IS_I965G(dev))
-+		else if (INTEL_INFO(dev)->gen >= 4)
- 			alignment = 4 * 1024;
- 		else
- 			alignment = 64 * 1024;
-@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
- 	}
- 
- 	ret = i915_gem_object_pin(obj, alignment);
--	if (ret != 0)
-+	if (ret)
- 		return ret;
- 
-+	ret = i915_gem_object_set_to_display_plane(obj, pipelined);
-+	if (ret)
-+		goto err_unpin;
-+
- 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
- 	 * fence, whereas 965+ only requires a fence if using
- 	 * framebuffer compression.  For simplicity, we always install
-@@ -1445,14 +1476,16 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
- 	 */
- 	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- 	    obj_priv->tiling_mode != I915_TILING_NONE) {
--		ret = i915_gem_object_get_fence_reg(obj);
--		if (ret != 0) {
--			i915_gem_object_unpin(obj);
--			return ret;
--		}
-+		ret = i915_gem_object_get_fence_reg(obj, false);
-+		if (ret)
-+			goto err_unpin;
- 	}
- 
- 	return 0;
-+
-+err_unpin:
-+	i915_gem_object_unpin(obj);
-+	return ret;
- }
- 
- /* Assume fb object is pinned & idle & fenced and just update base pointers */
-@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- 	struct drm_gem_object *obj;
- 	int plane = intel_crtc->plane;
- 	unsigned long Start, Offset;
--	int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
--	int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
--	int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
--	int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
--	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- 	u32 dspcntr;
-+	u32 reg;
- 
- 	switch (plane) {
- 	case 0:
-@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- 	obj = intel_fb->obj;
- 	obj_priv = to_intel_bo(obj);
- 
--	dspcntr = I915_READ(dspcntr_reg);
-+	reg = DSPCNTR(plane);
-+	dspcntr = I915_READ(reg);
- 	/* Mask out pixel format bits in case we change it */
- 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- 	switch (fb->bits_per_pixel) {
-@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- 		DRM_ERROR("Unknown color depth\n");
- 		return -EINVAL;
- 	}
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		if (obj_priv->tiling_mode != I915_TILING_NONE)
- 			dspcntr |= DISPPLANE_TILED;
- 		else
-@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- 		/* must disable */
- 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- 
--	I915_WRITE(dspcntr_reg, dspcntr);
-+	I915_WRITE(reg, dspcntr);
- 
- 	Start = obj_priv->gtt_offset;
- 	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
- 
- 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- 		      Start, Offset, x, y, fb->pitch);
--	I915_WRITE(dspstride, fb->pitch);
--	if (IS_I965G(dev)) {
--		I915_WRITE(dspsurf, Start);
--		I915_WRITE(dsptileoff, (y << 16) | x);
--		I915_WRITE(dspbase, Offset);
--	} else {
--		I915_WRITE(dspbase, Start + Offset);
--	}
--	POSTING_READ(dspbase);
--
--	if (IS_I965G(dev) || plane == 0)
--		intel_update_fbc(crtc, &crtc->mode);
-+	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
-+	if (INTEL_INFO(dev)->gen >= 4) {
-+		I915_WRITE(DSPSURF(plane), Start);
-+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
-+		I915_WRITE(DSPADDR(plane), Offset);
-+	} else
-+		I915_WRITE(DSPADDR(plane), Start + Offset);
-+	POSTING_READ(reg);
- 
--	intel_wait_for_vblank(dev, intel_crtc->pipe);
--	intel_increase_pllclock(crtc, true);
-+	intel_update_fbc(dev);
-+	intel_increase_pllclock(crtc);
- 
- 	return 0;
- }
-@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- 	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_master_private *master_priv;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--	struct intel_framebuffer *intel_fb;
--	struct drm_i915_gem_object *obj_priv;
--	struct drm_gem_object *obj;
--	int pipe = intel_crtc->pipe;
--	int plane = intel_crtc->plane;
- 	int ret;
- 
- 	/* no fb bound */
-@@ -1566,45 +1587,41 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- 		return 0;
- 	}
- 
--	switch (plane) {
-+	switch (intel_crtc->plane) {
- 	case 0:
- 	case 1:
- 		break;
- 	default:
--		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
- 		return -EINVAL;
- 	}
- 
--	intel_fb = to_intel_framebuffer(crtc->fb);
--	obj = intel_fb->obj;
--	obj_priv = to_intel_bo(obj);
--
- 	mutex_lock(&dev->struct_mutex);
--	ret = intel_pin_and_fence_fb_obj(dev, obj);
-+	ret = intel_pin_and_fence_fb_obj(dev,
-+					 to_intel_framebuffer(crtc->fb)->obj,
-+					 false);
- 	if (ret != 0) {
- 		mutex_unlock(&dev->struct_mutex);
- 		return ret;
- 	}
- 
--	ret = i915_gem_object_set_to_display_plane(obj);
--	if (ret != 0) {
--		i915_gem_object_unpin(obj);
--		mutex_unlock(&dev->struct_mutex);
--		return ret;
-+	if (old_fb) {
-+		struct drm_i915_private *dev_priv = dev->dev_private;
-+		struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-+		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-+
-+		wait_event(dev_priv->pending_flip_queue,
-+			   atomic_read(&obj_priv->pending_flip) == 0);
- 	}
- 
- 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
- 	if (ret) {
--		i915_gem_object_unpin(obj);
-+		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- 		mutex_unlock(&dev->struct_mutex);
- 		return ret;
- 	}
- 
--	if (old_fb) {
--		intel_fb = to_intel_framebuffer(old_fb);
--		obj_priv = to_intel_bo(intel_fb->obj);
--		i915_gem_object_unpin(intel_fb->obj);
--	}
-+	if (old_fb)
-+		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
- 
- 	mutex_unlock(&dev->struct_mutex);
- 
-@@ -1615,7 +1632,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- 	if (!master_priv->sarea_priv)
- 		return 0;
- 
--	if (pipe) {
-+	if (intel_crtc->pipe) {
- 		master_priv->sarea_priv->pipeB_x = x;
- 		master_priv->sarea_priv->pipeB_y = y;
- 	} else {
-@@ -1626,7 +1643,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- 	return 0;
- }
- 
--static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
-+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
- {
- 	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -1659,9 +1676,41 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
- 	}
- 	I915_WRITE(DP_A, dpa_ctl);
- 
-+	POSTING_READ(DP_A);
- 	udelay(500);
- }
- 
-+static void intel_fdi_normal_train(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int pipe = intel_crtc->pipe;
-+	u32 reg, temp;
-+
-+	/* enable normal train */
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	temp &= ~FDI_LINK_TRAIN_NONE;
-+	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-+	I915_WRITE(reg, temp);
-+
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	if (HAS_PCH_CPT(dev)) {
-+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-+	} else {
-+		temp &= ~FDI_LINK_TRAIN_NONE;
-+		temp |= FDI_LINK_TRAIN_NONE;
-+	}
-+	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-+
-+	/* wait one idle pattern time */
-+	POSTING_READ(reg);
-+	udelay(1000);
-+}
-+
- /* The FDI link training functions for ILK/Ibexpeak. */
- static void ironlake_fdi_link_train(struct drm_crtc *crtc)
- {
-@@ -1669,84 +1718,88 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	int pipe = intel_crtc->pipe;
--	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
--	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
--	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
--	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
--	u32 temp, tries = 0;
-+	u32 reg, temp, tries;
- 
- 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- 	   for train result */
--	temp = I915_READ(fdi_rx_imr_reg);
-+	reg = FDI_RX_IMR(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_RX_SYMBOL_LOCK;
- 	temp &= ~FDI_RX_BIT_LOCK;
--	I915_WRITE(fdi_rx_imr_reg, temp);
--	I915_READ(fdi_rx_imr_reg);
-+	I915_WRITE(reg, temp);
-+	I915_READ(reg);
- 	udelay(150);
- 
- 	/* enable CPU FDI TX and PCH FDI RX */
--	temp = I915_READ(fdi_tx_reg);
--	temp |= FDI_TX_ENABLE;
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~(7 << 19);
- 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
- 	temp &= ~FDI_LINK_TRAIN_NONE;
- 	temp |= FDI_LINK_TRAIN_PATTERN_1;
--	I915_WRITE(fdi_tx_reg, temp);
--	I915_READ(fdi_tx_reg);
-+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
- 
--	temp = I915_READ(fdi_rx_reg);
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_LINK_TRAIN_NONE;
- 	temp |= FDI_LINK_TRAIN_PATTERN_1;
--	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
--	I915_READ(fdi_rx_reg);
-+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
-+
-+	POSTING_READ(reg);
- 	udelay(150);
- 
-+	/* Ironlake workaround, enable clock pointer after FDI enable*/
-+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
-+
-+	reg = FDI_RX_IIR(pipe);
- 	for (tries = 0; tries < 5; tries++) {
--		temp = I915_READ(fdi_rx_iir_reg);
-+		temp = I915_READ(reg);
- 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- 
- 		if ((temp & FDI_RX_BIT_LOCK)) {
- 			DRM_DEBUG_KMS("FDI train 1 done.\n");
--			I915_WRITE(fdi_rx_iir_reg,
--				   temp | FDI_RX_BIT_LOCK);
-+			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- 			break;
- 		}
- 	}
- 	if (tries == 5)
--		DRM_DEBUG_KMS("FDI train 1 fail!\n");
-+		DRM_ERROR("FDI train 1 fail!\n");
- 
- 	/* Train 2 */
--	temp = I915_READ(fdi_tx_reg);
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_LINK_TRAIN_NONE;
- 	temp |= FDI_LINK_TRAIN_PATTERN_2;
--	I915_WRITE(fdi_tx_reg, temp);
-+	I915_WRITE(reg, temp);
- 
--	temp = I915_READ(fdi_rx_reg);
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_LINK_TRAIN_NONE;
- 	temp |= FDI_LINK_TRAIN_PATTERN_2;
--	I915_WRITE(fdi_rx_reg, temp);
--	udelay(150);
-+	I915_WRITE(reg, temp);
- 
--	tries = 0;
-+	POSTING_READ(reg);
-+	udelay(150);
- 
-+	reg = FDI_RX_IIR(pipe);
- 	for (tries = 0; tries < 5; tries++) {
--		temp = I915_READ(fdi_rx_iir_reg);
-+		temp = I915_READ(reg);
- 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- 
- 		if (temp & FDI_RX_SYMBOL_LOCK) {
--			I915_WRITE(fdi_rx_iir_reg,
--				   temp | FDI_RX_SYMBOL_LOCK);
-+			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- 			DRM_DEBUG_KMS("FDI train 2 done.\n");
- 			break;
- 		}
- 	}
- 	if (tries == 5)
--		DRM_DEBUG_KMS("FDI train 2 fail!\n");
-+		DRM_ERROR("FDI train 2 fail!\n");
- 
- 	DRM_DEBUG_KMS("FDI train done\n");
-+
- }
- 
--static int snb_b_fdi_train_param [] = {
-+static const int const snb_b_fdi_train_param [] = {
- 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
- 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
- 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
-@@ -1760,24 +1813,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	int pipe = intel_crtc->pipe;
--	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
--	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
--	int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
--	int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
--	u32 temp, i;
-+	u32 reg, temp, i;
- 
- 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- 	   for train result */
--	temp = I915_READ(fdi_rx_imr_reg);
-+	reg = FDI_RX_IMR(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_RX_SYMBOL_LOCK;
- 	temp &= ~FDI_RX_BIT_LOCK;
--	I915_WRITE(fdi_rx_imr_reg, temp);
--	I915_READ(fdi_rx_imr_reg);
-+	I915_WRITE(reg, temp);
-+
-+	POSTING_READ(reg);
- 	udelay(150);
- 
- 	/* enable CPU FDI TX and PCH FDI RX */
--	temp = I915_READ(fdi_tx_reg);
--	temp |= FDI_TX_ENABLE;
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~(7 << 19);
- 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
- 	temp &= ~FDI_LINK_TRAIN_NONE;
-@@ -1785,10 +1836,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
- 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- 	/* SNB-B */
- 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
--	I915_WRITE(fdi_tx_reg, temp);
--	I915_READ(fdi_tx_reg);
-+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
- 
--	temp = I915_READ(fdi_rx_reg);
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	if (HAS_PCH_CPT(dev)) {
- 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-@@ -1796,32 +1847,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
- 		temp &= ~FDI_LINK_TRAIN_NONE;
- 		temp |= FDI_LINK_TRAIN_PATTERN_1;
- 	}
--	I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
--	I915_READ(fdi_rx_reg);
-+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
-+
-+	POSTING_READ(reg);
- 	udelay(150);
- 
- 	for (i = 0; i < 4; i++ ) {
--		temp = I915_READ(fdi_tx_reg);
-+		reg = FDI_TX_CTL(pipe);
-+		temp = I915_READ(reg);
- 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- 		temp |= snb_b_fdi_train_param[i];
--		I915_WRITE(fdi_tx_reg, temp);
-+		I915_WRITE(reg, temp);
-+
-+		POSTING_READ(reg);
- 		udelay(500);
- 
--		temp = I915_READ(fdi_rx_iir_reg);
-+		reg = FDI_RX_IIR(pipe);
-+		temp = I915_READ(reg);
- 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- 
- 		if (temp & FDI_RX_BIT_LOCK) {
--			I915_WRITE(fdi_rx_iir_reg,
--				   temp | FDI_RX_BIT_LOCK);
-+			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- 			DRM_DEBUG_KMS("FDI train 1 done.\n");
- 			break;
- 		}
- 	}
- 	if (i == 4)
--		DRM_DEBUG_KMS("FDI train 1 fail!\n");
-+		DRM_ERROR("FDI train 1 fail!\n");
- 
- 	/* Train 2 */
--	temp = I915_READ(fdi_tx_reg);
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	temp &= ~FDI_LINK_TRAIN_NONE;
- 	temp |= FDI_LINK_TRAIN_PATTERN_2;
- 	if (IS_GEN6(dev)) {
-@@ -1829,9 +1885,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
- 		/* SNB-B */
- 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- 	}
--	I915_WRITE(fdi_tx_reg, temp);
-+	I915_WRITE(reg, temp);
- 
--	temp = I915_READ(fdi_rx_reg);
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
- 	if (HAS_PCH_CPT(dev)) {
- 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-@@ -1839,535 +1896,596 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
- 		temp &= ~FDI_LINK_TRAIN_NONE;
- 		temp |= FDI_LINK_TRAIN_PATTERN_2;
- 	}
--	I915_WRITE(fdi_rx_reg, temp);
-+	I915_WRITE(reg, temp);
-+
-+	POSTING_READ(reg);
- 	udelay(150);
- 
- 	for (i = 0; i < 4; i++ ) {
--		temp = I915_READ(fdi_tx_reg);
-+		reg = FDI_TX_CTL(pipe);
-+		temp = I915_READ(reg);
- 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- 		temp |= snb_b_fdi_train_param[i];
--		I915_WRITE(fdi_tx_reg, temp);
-+		I915_WRITE(reg, temp);
-+
-+		POSTING_READ(reg);
- 		udelay(500);
- 
--		temp = I915_READ(fdi_rx_iir_reg);
-+		reg = FDI_RX_IIR(pipe);
-+		temp = I915_READ(reg);
- 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- 
- 		if (temp & FDI_RX_SYMBOL_LOCK) {
--			I915_WRITE(fdi_rx_iir_reg,
--				   temp | FDI_RX_SYMBOL_LOCK);
-+			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- 			DRM_DEBUG_KMS("FDI train 2 done.\n");
- 			break;
- 		}
- 	}
- 	if (i == 4)
--		DRM_DEBUG_KMS("FDI train 2 fail!\n");
-+		DRM_ERROR("FDI train 2 fail!\n");
- 
- 	DRM_DEBUG_KMS("FDI train done.\n");
- }
- 
--static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-+static void ironlake_fdi_enable(struct drm_crtc *crtc)
- {
- 	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	int pipe = intel_crtc->pipe;
--	int plane = intel_crtc->plane;
--	int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
--	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
--	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
--	int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
--	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
--	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
--	int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
--	int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
--	int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
--	int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
--	int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
--	int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
--	int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
--	int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
--	int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
--	int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
--	int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
--	int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
--	int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
--	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
--	u32 temp;
--	u32 pipe_bpc;
--
--	temp = I915_READ(pipeconf_reg);
--	pipe_bpc = temp & PIPE_BPC_MASK;
-+	u32 reg, temp;
- 
--	/* XXX: When our outputs are all unaware of DPMS modes other than off
--	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
--	 */
--	switch (mode) {
--	case DRM_MODE_DPMS_ON:
--	case DRM_MODE_DPMS_STANDBY:
--	case DRM_MODE_DPMS_SUSPEND:
--		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
-+	/* Write the TU size bits so error detection works */
-+	I915_WRITE(FDI_RX_TUSIZE1(pipe),
-+		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- 
--		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
--			temp = I915_READ(PCH_LVDS);
--			if ((temp & LVDS_PORT_EN) == 0) {
--				I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
--				POSTING_READ(PCH_LVDS);
--			}
--		}
-+	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	temp &= ~((0x7 << 19) | (0x7 << 16));
-+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
-+	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
-+	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
- 
--		if (!HAS_eDP) {
-+	POSTING_READ(reg);
-+	udelay(200);
- 
--			/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
--			temp = I915_READ(fdi_rx_reg);
--			/*
--			 * make the BPC in FDI Rx be consistent with that in
--			 * pipeconf reg.
--			 */
--			temp &= ~(0x7 << 16);
--			temp |= (pipe_bpc << 11);
--			temp &= ~(7 << 19);
--			temp |= (intel_crtc->fdi_lanes - 1) << 19;
--			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
--			I915_READ(fdi_rx_reg);
--			udelay(200);
-+	/* Switch from Rawclk to PCDclk */
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp | FDI_PCDCLK);
- 
--			/* Switch from Rawclk to PCDclk */
--			temp = I915_READ(fdi_rx_reg);
--			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
--			I915_READ(fdi_rx_reg);
--			udelay(200);
-+	POSTING_READ(reg);
-+	udelay(200);
- 
--			/* Enable CPU FDI TX PLL, always on for Ironlake */
--			temp = I915_READ(fdi_tx_reg);
--			if ((temp & FDI_TX_PLL_ENABLE) == 0) {
--				I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
--				I915_READ(fdi_tx_reg);
--				udelay(100);
--			}
--		}
-+	/* Enable CPU FDI TX PLL, always on for Ironlake */
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-+		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- 
--		/* Enable panel fitting for LVDS */
--		if (dev_priv->pch_pf_size &&
--		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
--		    || HAS_eDP || intel_pch_has_edp(crtc))) {
--			/* Force use of hard-coded filter coefficients
--			 * as some pre-programmed values are broken,
--			 * e.g. x201.
--			 */
--			I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
--				   PF_ENABLE | PF_FILTER_MED_3x3);
--			I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
--				   dev_priv->pch_pf_pos);
--			I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
--				   dev_priv->pch_pf_size);
--		}
-+		POSTING_READ(reg);
-+		udelay(100);
-+	}
-+}
- 
--		/* Enable CPU pipe */
--		temp = I915_READ(pipeconf_reg);
--		if ((temp & PIPEACONF_ENABLE) == 0) {
--			I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
--			I915_READ(pipeconf_reg);
--			udelay(100);
--		}
-+static void intel_flush_display_plane(struct drm_device *dev,
-+				      int plane)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 reg = DSPADDR(plane);
-+	I915_WRITE(reg, I915_READ(reg));
-+}
- 
--		/* configure and enable CPU plane */
--		temp = I915_READ(dspcntr_reg);
--		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
--			I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
--			/* Flush the plane changes */
--			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
--		}
-+/*
-+ * When we disable a pipe, we need to clear any pending scanline wait events
-+ * to avoid hanging the ring, which we assume we are waiting on.
-+ */
-+static void intel_clear_scanline_wait(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 tmp;
- 
--		if (!HAS_eDP) {
--			/* For PCH output, training FDI link */
--			if (IS_GEN6(dev))
--				gen6_fdi_link_train(crtc);
--			else
--				ironlake_fdi_link_train(crtc);
-+	if (IS_GEN2(dev))
-+		/* Can't break the hang on i8xx */
-+		return;
- 
--			/* enable PCH DPLL */
--			temp = I915_READ(pch_dpll_reg);
--			if ((temp & DPLL_VCO_ENABLE) == 0) {
--				I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
--				I915_READ(pch_dpll_reg);
--			}
--			udelay(200);
-+	tmp = I915_READ(PRB0_CTL);
-+	if (tmp & RING_WAIT) {
-+		I915_WRITE(PRB0_CTL, tmp);
-+		POSTING_READ(PRB0_CTL);
-+	}
-+}
- 
--			if (HAS_PCH_CPT(dev)) {
--				/* Be sure PCH DPLL SEL is set */
--				temp = I915_READ(PCH_DPLL_SEL);
--				if (trans_dpll_sel == 0 &&
--						(temp & TRANSA_DPLL_ENABLE) == 0)
--					temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
--				else if (trans_dpll_sel == 1 &&
--						(temp & TRANSB_DPLL_ENABLE) == 0)
--					temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
--				I915_WRITE(PCH_DPLL_SEL, temp);
--				I915_READ(PCH_DPLL_SEL);
--			}
-+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
-+{
-+	struct drm_i915_gem_object *obj_priv;
-+	struct drm_i915_private *dev_priv;
- 
--			/* set transcoder timing */
--			I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
--			I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
--			I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
--
--			I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
--			I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
--			I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
--
--			/* enable normal train */
--			temp = I915_READ(fdi_tx_reg);
--			temp &= ~FDI_LINK_TRAIN_NONE;
--			I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
--					FDI_TX_ENHANCE_FRAME_ENABLE);
--			I915_READ(fdi_tx_reg);
--
--			temp = I915_READ(fdi_rx_reg);
--			if (HAS_PCH_CPT(dev)) {
--				temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
--				temp |= FDI_LINK_TRAIN_NORMAL_CPT;
--			} else {
--				temp &= ~FDI_LINK_TRAIN_NONE;
--				temp |= FDI_LINK_TRAIN_NONE;
--			}
--			I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
--			I915_READ(fdi_rx_reg);
-+	if (crtc->fb == NULL)
-+		return;
- 
--			/* wait one idle pattern time */
--			udelay(100);
-+	obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
-+	dev_priv = crtc->dev->dev_private;
-+	wait_event(dev_priv->pending_flip_queue,
-+		   atomic_read(&obj_priv->pending_flip) == 0);
-+}
- 
--			/* For PCH DP, enable TRANS_DP_CTL */
--			if (HAS_PCH_CPT(dev) &&
--			    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
--				int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
--				int reg;
--
--				reg = I915_READ(trans_dp_ctl);
--				reg &= ~(TRANS_DP_PORT_SEL_MASK |
--					 TRANS_DP_SYNC_MASK);
--				reg |= (TRANS_DP_OUTPUT_ENABLE |
--					TRANS_DP_ENH_FRAMING);
--
--				if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
--				      reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
--				if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
--				      reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
--
--				switch (intel_trans_dp_port_sel(crtc)) {
--				case PCH_DP_B:
--					reg |= TRANS_DP_PORT_SEL_B;
--					break;
--				case PCH_DP_C:
--					reg |= TRANS_DP_PORT_SEL_C;
--					break;
--				case PCH_DP_D:
--					reg |= TRANS_DP_PORT_SEL_D;
--					break;
--				default:
--					DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
--					reg |= TRANS_DP_PORT_SEL_B;
--					break;
--				}
-+static void ironlake_crtc_enable(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int pipe = intel_crtc->pipe;
-+	int plane = intel_crtc->plane;
-+	u32 reg, temp;
- 
--				I915_WRITE(trans_dp_ctl, reg);
--				POSTING_READ(trans_dp_ctl);
--			}
-+	if (intel_crtc->active)
-+		return;
- 
--			/* enable PCH transcoder */
--			temp = I915_READ(transconf_reg);
--			/*
--			 * make the BPC in transcoder be consistent with
--			 * that in pipeconf reg.
--			 */
--			temp &= ~PIPE_BPC_MASK;
--			temp |= pipe_bpc;
--			I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
--			I915_READ(transconf_reg);
-+	intel_crtc->active = true;
-+	intel_update_watermarks(dev);
- 
--			if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
--				DRM_ERROR("failed to enable transcoder\n");
--		}
-+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-+		temp = I915_READ(PCH_LVDS);
-+		if ((temp & LVDS_PORT_EN) == 0)
-+			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
-+	}
- 
--		intel_crtc_load_lut(crtc);
-+	ironlake_fdi_enable(crtc);
- 
--		intel_update_fbc(crtc, &crtc->mode);
--		break;
-+	/* Enable panel fitting for LVDS */
-+	if (dev_priv->pch_pf_size &&
-+	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
-+		/* Force use of hard-coded filter coefficients
-+		 * as some pre-programmed values are broken,
-+		 * e.g. x201.
-+		 */
-+		I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
-+			   PF_ENABLE | PF_FILTER_MED_3x3);
-+		I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
-+			   dev_priv->pch_pf_pos);
-+		I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
-+			   dev_priv->pch_pf_size);
-+	}
-+
-+	/* Enable CPU pipe */
-+	reg = PIPECONF(pipe);
-+	temp = I915_READ(reg);
-+	if ((temp & PIPECONF_ENABLE) == 0) {
-+		I915_WRITE(reg, temp | PIPECONF_ENABLE);
-+		POSTING_READ(reg);
-+		intel_wait_for_vblank(dev, intel_crtc->pipe);
-+	}
-+
-+	/* configure and enable CPU plane */
-+	reg = DSPCNTR(plane);
-+	temp = I915_READ(reg);
-+	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-+		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
-+		intel_flush_display_plane(dev, plane);
-+	}
-+
-+	/* For PCH output, training FDI link */
-+	if (IS_GEN6(dev))
-+		gen6_fdi_link_train(crtc);
-+	else
-+		ironlake_fdi_link_train(crtc);
-+
-+	/* enable PCH DPLL */
-+	reg = PCH_DPLL(pipe);
-+	temp = I915_READ(reg);
-+	if ((temp & DPLL_VCO_ENABLE) == 0) {
-+		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-+		POSTING_READ(reg);
-+		udelay(200);
-+	}
- 
--	case DRM_MODE_DPMS_OFF:
--		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
-+	if (HAS_PCH_CPT(dev)) {
-+		/* Be sure PCH DPLL SEL is set */
-+		temp = I915_READ(PCH_DPLL_SEL);
-+		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
-+			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
-+		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
-+			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-+		I915_WRITE(PCH_DPLL_SEL, temp);
-+	}
-+
-+	/* set transcoder timing */
-+	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
-+	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
-+	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
-+
-+	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
-+	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
-+	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
-+
-+	intel_fdi_normal_train(crtc);
-+
-+	/* For PCH DP, enable TRANS_DP_CTL */
-+	if (HAS_PCH_CPT(dev) &&
-+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-+		reg = TRANS_DP_CTL(pipe);
-+		temp = I915_READ(reg);
-+		temp &= ~(TRANS_DP_PORT_SEL_MASK |
-+			  TRANS_DP_SYNC_MASK);
-+		temp |= (TRANS_DP_OUTPUT_ENABLE |
-+			 TRANS_DP_ENH_FRAMING);
- 
--		drm_vblank_off(dev, pipe);
--		/* Disable display plane */
--		temp = I915_READ(dspcntr_reg);
--		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
--			I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
--			/* Flush the plane changes */
--			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
--			I915_READ(dspbase_reg);
-+		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
-+			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
-+		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
-+			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-+
-+		switch (intel_trans_dp_port_sel(crtc)) {
-+		case PCH_DP_B:
-+			temp |= TRANS_DP_PORT_SEL_B;
-+			break;
-+		case PCH_DP_C:
-+			temp |= TRANS_DP_PORT_SEL_C;
-+			break;
-+		case PCH_DP_D:
-+			temp |= TRANS_DP_PORT_SEL_D;
-+			break;
-+		default:
-+			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
-+			temp |= TRANS_DP_PORT_SEL_B;
-+			break;
- 		}
- 
--		if (dev_priv->cfb_plane == plane &&
--		    dev_priv->display.disable_fbc)
--			dev_priv->display.disable_fbc(dev);
-+		I915_WRITE(reg, temp);
-+	}
- 
--		/* disable cpu pipe, disable after all planes disabled */
--		temp = I915_READ(pipeconf_reg);
--		if ((temp & PIPEACONF_ENABLE) != 0) {
--			I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-+	/* enable PCH transcoder */
-+	reg = TRANSCONF(pipe);
-+	temp = I915_READ(reg);
-+	/*
-+	 * make the BPC in transcoder be consistent with
-+	 * that in pipeconf reg.
-+	 */
-+	temp &= ~PIPE_BPC_MASK;
-+	temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
-+	I915_WRITE(reg, temp | TRANS_ENABLE);
-+	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
-+		DRM_ERROR("failed to enable transcoder %d\n", pipe);
- 
--			/* wait for cpu pipe off, pipe state */
--			if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
--				DRM_ERROR("failed to turn off cpu pipe\n");
--		} else
--			DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
-+	intel_crtc_load_lut(crtc);
-+	intel_update_fbc(dev);
-+	intel_crtc_update_cursor(crtc, true);
-+}
- 
--		udelay(100);
-+static void ironlake_crtc_disable(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int pipe = intel_crtc->pipe;
-+	int plane = intel_crtc->plane;
-+	u32 reg, temp;
- 
--		/* Disable PF */
--		I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
--		I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-+	if (!intel_crtc->active)
-+		return;
- 
--		/* disable CPU FDI tx and PCH FDI rx */
--		temp = I915_READ(fdi_tx_reg);
--		I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
--		I915_READ(fdi_tx_reg);
-+	intel_crtc_wait_for_pending_flips(crtc);
-+	drm_vblank_off(dev, pipe);
-+	intel_crtc_update_cursor(crtc, false);
- 
--		temp = I915_READ(fdi_rx_reg);
--		/* BPC in FDI rx is consistent with that in pipeconf */
--		temp &= ~(0x07 << 16);
--		temp |= (pipe_bpc << 11);
--		I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
--		I915_READ(fdi_rx_reg);
-+	/* Disable display plane */
-+	reg = DSPCNTR(plane);
-+	temp = I915_READ(reg);
-+	if (temp & DISPLAY_PLANE_ENABLE) {
-+		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
-+		intel_flush_display_plane(dev, plane);
-+	}
- 
--		udelay(100);
-+	if (dev_priv->cfb_plane == plane &&
-+	    dev_priv->display.disable_fbc)
-+		dev_priv->display.disable_fbc(dev);
- 
--		/* still set train pattern 1 */
--		temp = I915_READ(fdi_tx_reg);
-+	/* disable cpu pipe, disable after all planes disabled */
-+	reg = PIPECONF(pipe);
-+	temp = I915_READ(reg);
-+	if (temp & PIPECONF_ENABLE) {
-+		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-+		POSTING_READ(reg);
-+		/* wait for cpu pipe off, pipe state */
-+		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
-+	}
-+
-+	/* Disable PF */
-+	I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
-+	I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-+
-+	/* disable CPU FDI tx and PCH FDI rx */
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
-+	POSTING_READ(reg);
-+
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	temp &= ~(0x7 << 16);
-+	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
-+	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-+
-+	POSTING_READ(reg);
-+	udelay(100);
-+
-+	/* Ironlake workaround, disable clock pointer after downing FDI */
-+	if (HAS_PCH_IBX(dev))
-+		I915_WRITE(FDI_RX_CHICKEN(pipe),
-+			   I915_READ(FDI_RX_CHICKEN(pipe) &
-+				     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-+
-+	/* still set train pattern 1 */
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	temp &= ~FDI_LINK_TRAIN_NONE;
-+	temp |= FDI_LINK_TRAIN_PATTERN_1;
-+	I915_WRITE(reg, temp);
-+
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	if (HAS_PCH_CPT(dev)) {
-+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-+	} else {
- 		temp &= ~FDI_LINK_TRAIN_NONE;
- 		temp |= FDI_LINK_TRAIN_PATTERN_1;
--		I915_WRITE(fdi_tx_reg, temp);
--		POSTING_READ(fdi_tx_reg);
--
--		temp = I915_READ(fdi_rx_reg);
--		if (HAS_PCH_CPT(dev)) {
--			temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
--			temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
--		} else {
--			temp &= ~FDI_LINK_TRAIN_NONE;
--			temp |= FDI_LINK_TRAIN_PATTERN_1;
--		}
--		I915_WRITE(fdi_rx_reg, temp);
--		POSTING_READ(fdi_rx_reg);
-+	}
-+	/* BPC in FDI rx is consistent with that in PIPECONF */
-+	temp &= ~(0x07 << 16);
-+	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
-+	I915_WRITE(reg, temp);
- 
--		udelay(100);
-+	POSTING_READ(reg);
-+	udelay(100);
- 
--		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
--			temp = I915_READ(PCH_LVDS);
-+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-+		temp = I915_READ(PCH_LVDS);
-+		if (temp & LVDS_PORT_EN) {
- 			I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
--			I915_READ(PCH_LVDS);
-+			POSTING_READ(PCH_LVDS);
- 			udelay(100);
- 		}
-+	}
- 
--		/* disable PCH transcoder */
--		temp = I915_READ(transconf_reg);
--		if ((temp & TRANS_ENABLE) != 0) {
--			I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
-+	/* disable PCH transcoder */
-+	reg = TRANSCONF(plane);
-+	temp = I915_READ(reg);
-+	if (temp & TRANS_ENABLE) {
-+		I915_WRITE(reg, temp & ~TRANS_ENABLE);
-+		/* wait for PCH transcoder off, transcoder state */
-+		if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
-+			DRM_ERROR("failed to disable transcoder\n");
-+	}
- 
--			/* wait for PCH transcoder off, transcoder state */
--			if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
--				DRM_ERROR("failed to disable transcoder\n");
--		}
-+	if (HAS_PCH_CPT(dev)) {
-+		/* disable TRANS_DP_CTL */
-+		reg = TRANS_DP_CTL(pipe);
-+		temp = I915_READ(reg);
-+		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
-+		I915_WRITE(reg, temp);
- 
--		temp = I915_READ(transconf_reg);
--		/* BPC in transcoder is consistent with that in pipeconf */
--		temp &= ~PIPE_BPC_MASK;
--		temp |= pipe_bpc;
--		I915_WRITE(transconf_reg, temp);
--		I915_READ(transconf_reg);
--		udelay(100);
-+		/* disable DPLL_SEL */
-+		temp = I915_READ(PCH_DPLL_SEL);
-+		if (pipe == 0)
-+			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
-+		else
-+			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-+		I915_WRITE(PCH_DPLL_SEL, temp);
-+	}
- 
--		if (HAS_PCH_CPT(dev)) {
--			/* disable TRANS_DP_CTL */
--			int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
--			int reg;
-+	/* disable PCH DPLL */
-+	reg = PCH_DPLL(pipe);
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- 
--			reg = I915_READ(trans_dp_ctl);
--			reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
--			I915_WRITE(trans_dp_ctl, reg);
--			POSTING_READ(trans_dp_ctl);
-+	/* Switch from PCDclk to Rawclk */
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp & ~FDI_PCDCLK);
- 
--			/* disable DPLL_SEL */
--			temp = I915_READ(PCH_DPLL_SEL);
--			if (trans_dpll_sel == 0)
--				temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
--			else
--				temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
--			I915_WRITE(PCH_DPLL_SEL, temp);
--			I915_READ(PCH_DPLL_SEL);
-+	/* Disable CPU FDI TX PLL */
-+	reg = FDI_TX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
- 
--		}
-+	POSTING_READ(reg);
-+	udelay(100);
- 
--		/* disable PCH DPLL */
--		temp = I915_READ(pch_dpll_reg);
--		I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
--		I915_READ(pch_dpll_reg);
--
--		/* Switch from PCDclk to Rawclk */
--		temp = I915_READ(fdi_rx_reg);
--		temp &= ~FDI_SEL_PCDCLK;
--		I915_WRITE(fdi_rx_reg, temp);
--		I915_READ(fdi_rx_reg);
--
--		/* Disable CPU FDI TX PLL */
--		temp = I915_READ(fdi_tx_reg);
--		I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
--		I915_READ(fdi_tx_reg);
--		udelay(100);
-+	reg = FDI_RX_CTL(pipe);
-+	temp = I915_READ(reg);
-+	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
- 
--		temp = I915_READ(fdi_rx_reg);
--		temp &= ~FDI_RX_PLL_ENABLE;
--		I915_WRITE(fdi_rx_reg, temp);
--		I915_READ(fdi_rx_reg);
-+	/* Wait for the clocks to turn off. */
-+	POSTING_READ(reg);
-+	udelay(100);
- 
--		/* Wait for the clocks to turn off. */
--		udelay(100);
-+	intel_crtc->active = false;
-+	intel_update_watermarks(dev);
-+	intel_update_fbc(dev);
-+	intel_clear_scanline_wait(dev);
-+}
-+
-+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-+{
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int pipe = intel_crtc->pipe;
-+	int plane = intel_crtc->plane;
-+
-+	/* XXX: When our outputs are all unaware of DPMS modes other than off
-+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-+	 */
-+	switch (mode) {
-+	case DRM_MODE_DPMS_ON:
-+	case DRM_MODE_DPMS_STANDBY:
-+	case DRM_MODE_DPMS_SUSPEND:
-+		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
-+		ironlake_crtc_enable(crtc);
-+		break;
-+
-+	case DRM_MODE_DPMS_OFF:
-+		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
-+		ironlake_crtc_disable(crtc);
- 		break;
- 	}
- }
- 
- static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
- {
--	struct intel_overlay *overlay;
--	int ret;
--
- 	if (!enable && intel_crtc->overlay) {
--		overlay = intel_crtc->overlay;
--		mutex_lock(&overlay->dev->struct_mutex);
--		for (;;) {
--			ret = intel_overlay_switch_off(overlay);
--			if (ret == 0)
--				break;
-+		struct drm_device *dev = intel_crtc->base.dev;
- 
--			ret = intel_overlay_recover_from_interrupt(overlay, 0);
--			if (ret != 0) {
--				/* overlay doesn't react anymore. Usually
--				 * results in a black screen and an unkillable
--				 * X server. */
--				BUG();
--				overlay->hw_wedged = HW_WEDGED;
--				break;
--			}
--		}
--		mutex_unlock(&overlay->dev->struct_mutex);
-+		mutex_lock(&dev->struct_mutex);
-+		(void) intel_overlay_switch_off(intel_crtc->overlay, false);
-+		mutex_unlock(&dev->struct_mutex);
- 	}
--	/* Let userspace switch the overlay on again. In most cases userspace
--	 * has to recompute where to put it anyway. */
- 
--	return;
-+	/* Let userspace switch the overlay on again. In most cases userspace
-+	 * has to recompute where to put it anyway.
-+	 */
- }
- 
--static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-+static void i9xx_crtc_enable(struct drm_crtc *crtc)
- {
- 	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	int pipe = intel_crtc->pipe;
- 	int plane = intel_crtc->plane;
--	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
--	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
--	int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
--	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
--	u32 temp;
-+	u32 reg, temp;
- 
--	/* XXX: When our outputs are all unaware of DPMS modes other than off
--	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
--	 */
--	switch (mode) {
--	case DRM_MODE_DPMS_ON:
--	case DRM_MODE_DPMS_STANDBY:
--	case DRM_MODE_DPMS_SUSPEND:
--		/* Enable the DPLL */
--		temp = I915_READ(dpll_reg);
--		if ((temp & DPLL_VCO_ENABLE) == 0) {
--			I915_WRITE(dpll_reg, temp);
--			I915_READ(dpll_reg);
--			/* Wait for the clocks to stabilize. */
--			udelay(150);
--			I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
--			I915_READ(dpll_reg);
--			/* Wait for the clocks to stabilize. */
--			udelay(150);
--			I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
--			I915_READ(dpll_reg);
--			/* Wait for the clocks to stabilize. */
--			udelay(150);
--		}
-+	if (intel_crtc->active)
-+		return;
- 
--		/* Enable the pipe */
--		temp = I915_READ(pipeconf_reg);
--		if ((temp & PIPEACONF_ENABLE) == 0)
--			I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
--
--		/* Enable the plane */
--		temp = I915_READ(dspcntr_reg);
--		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
--			I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
--			/* Flush the plane changes */
--			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
--		}
-+	intel_crtc->active = true;
-+	intel_update_watermarks(dev);
- 
--		intel_crtc_load_lut(crtc);
-+	/* Enable the DPLL */
-+	reg = DPLL(pipe);
-+	temp = I915_READ(reg);
-+	if ((temp & DPLL_VCO_ENABLE) == 0) {
-+		I915_WRITE(reg, temp);
- 
--		if ((IS_I965G(dev) || plane == 0))
--			intel_update_fbc(crtc, &crtc->mode);
-+		/* Wait for the clocks to stabilize. */
-+		POSTING_READ(reg);
-+		udelay(150);
- 
--		/* Give the overlay scaler a chance to enable if it's on this pipe */
--		intel_crtc_dpms_overlay(intel_crtc, true);
--	break;
--	case DRM_MODE_DPMS_OFF:
--		/* Give the overlay scaler a chance to disable if it's on this pipe */
--		intel_crtc_dpms_overlay(intel_crtc, false);
--		drm_vblank_off(dev, pipe);
--
--		if (dev_priv->cfb_plane == plane &&
--		    dev_priv->display.disable_fbc)
--			dev_priv->display.disable_fbc(dev);
--
--		/* Disable display plane */
--		temp = I915_READ(dspcntr_reg);
--		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
--			I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
--			/* Flush the plane changes */
--			I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
--			I915_READ(dspbase_reg);
--		}
-+		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-+
-+		/* Wait for the clocks to stabilize. */
-+		POSTING_READ(reg);
-+		udelay(150);
-+
-+		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- 
--		/* Don't disable pipe A or pipe A PLLs if needed */
--		if (pipeconf_reg == PIPEACONF &&
--		    (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
--			/* Wait for vblank for the disable to take effect */
-+		/* Wait for the clocks to stabilize. */
-+		POSTING_READ(reg);
-+		udelay(150);
-+	}
-+
-+	/* Enable the pipe */
-+	reg = PIPECONF(pipe);
-+	temp = I915_READ(reg);
-+	if ((temp & PIPECONF_ENABLE) == 0)
-+		I915_WRITE(reg, temp | PIPECONF_ENABLE);
-+
-+	/* Enable the plane */
-+	reg = DSPCNTR(plane);
-+	temp = I915_READ(reg);
-+	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-+		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
-+		intel_flush_display_plane(dev, plane);
-+	}
-+
-+	intel_crtc_load_lut(crtc);
-+	intel_update_fbc(dev);
-+
-+	/* Give the overlay scaler a chance to enable if it's on this pipe */
-+	intel_crtc_dpms_overlay(intel_crtc, true);
-+	intel_crtc_update_cursor(crtc, true);
-+}
-+
-+static void i9xx_crtc_disable(struct drm_crtc *crtc)
-+{
-+	struct drm_device *dev = crtc->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	int pipe = intel_crtc->pipe;
-+	int plane = intel_crtc->plane;
-+	u32 reg, temp;
-+
-+	if (!intel_crtc->active)
-+		return;
-+
-+	/* Give the overlay scaler a chance to disable if it's on this pipe */
-+	intel_crtc_wait_for_pending_flips(crtc);
-+	drm_vblank_off(dev, pipe);
-+	intel_crtc_dpms_overlay(intel_crtc, false);
-+	intel_crtc_update_cursor(crtc, false);
-+
-+	if (dev_priv->cfb_plane == plane &&
-+	    dev_priv->display.disable_fbc)
-+		dev_priv->display.disable_fbc(dev);
-+
-+	/* Disable display plane */
-+	reg = DSPCNTR(plane);
-+	temp = I915_READ(reg);
-+	if (temp & DISPLAY_PLANE_ENABLE) {
-+		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
-+		/* Flush the plane changes */
-+		intel_flush_display_plane(dev, plane);
-+
-+		/* Wait for vblank for the disable to take effect */
-+		if (IS_GEN2(dev))
- 			intel_wait_for_vblank(dev, pipe);
--			goto skip_pipe_off;
--		}
-+	}
- 
--		/* Next, disable display pipes */
--		temp = I915_READ(pipeconf_reg);
--		if ((temp & PIPEACONF_ENABLE) != 0) {
--			I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
--			I915_READ(pipeconf_reg);
--		}
-+	/* Don't disable pipe A or pipe A PLLs if needed */
-+	if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
-+		goto done;
-+
-+	/* Next, disable display pipes */
-+	reg = PIPECONF(pipe);
-+	temp = I915_READ(reg);
-+	if (temp & PIPECONF_ENABLE) {
-+		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
- 
- 		/* Wait for the pipe to turn off */
-+		POSTING_READ(reg);
- 		intel_wait_for_pipe_off(dev, pipe);
-+	}
-+
-+	reg = DPLL(pipe);
-+	temp = I915_READ(reg);
-+	if (temp & DPLL_VCO_ENABLE) {
-+		I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- 
--		temp = I915_READ(dpll_reg);
--		if ((temp & DPLL_VCO_ENABLE) != 0) {
--			I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
--			I915_READ(dpll_reg);
--		}
--	skip_pipe_off:
- 		/* Wait for the clocks to turn off. */
-+		POSTING_READ(reg);
- 		udelay(150);
-+	}
-+
-+done:
-+	intel_crtc->active = false;
-+	intel_update_fbc(dev);
-+	intel_update_watermarks(dev);
-+	intel_clear_scanline_wait(dev);
-+}
-+
-+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-+{
-+	/* XXX: When our outputs are all unaware of DPMS modes other than off
-+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-+	 */
-+	switch (mode) {
-+	case DRM_MODE_DPMS_ON:
-+	case DRM_MODE_DPMS_STANDBY:
-+	case DRM_MODE_DPMS_SUSPEND:
-+		i9xx_crtc_enable(crtc);
-+		break;
-+	case DRM_MODE_DPMS_OFF:
-+		i9xx_crtc_disable(crtc);
- 		break;
- 	}
- }
-@@ -2388,26 +2506,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
- 		return;
- 
- 	intel_crtc->dpms_mode = mode;
--	intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
--
--	/* When switching on the display, ensure that SR is disabled
--	 * with multiple pipes prior to enabling to new pipe.
--	 *
--	 * When switching off the display, make sure the cursor is
--	 * properly hidden prior to disabling the pipe.
--	 */
--	if (mode == DRM_MODE_DPMS_ON)
--		intel_update_watermarks(dev);
--	else
--		intel_crtc_update_cursor(crtc);
- 
- 	dev_priv->display.dpms(crtc, mode);
- 
--	if (mode == DRM_MODE_DPMS_ON)
--		intel_crtc_update_cursor(crtc);
--	else
--		intel_update_watermarks(dev);
--
- 	if (!dev->primary->master)
- 		return;
- 
-@@ -2432,16 +2533,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
- 	}
- }
- 
--static void intel_crtc_prepare (struct drm_crtc *crtc)
-+static void intel_crtc_disable(struct drm_crtc *crtc)
- {
- 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+	struct drm_device *dev = crtc->dev;
-+
- 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-+
-+	if (crtc->fb) {
-+		mutex_lock(&dev->struct_mutex);
-+		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-+		mutex_unlock(&dev->struct_mutex);
-+	}
-+}
-+
-+/* Prepare for a mode set.
-+ *
-+ * Note we could be a lot smarter here.  We need to figure out which outputs
-+ * will be enabled, which disabled (in short, how the config will changes)
-+ * and perform the minimum necessary steps to accomplish that, e.g. updating
-+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
-+ * panel fitting is in the proper state, etc.
-+ */
-+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
-+{
-+	i9xx_crtc_disable(crtc);
- }
- 
--static void intel_crtc_commit (struct drm_crtc *crtc)
-+static void i9xx_crtc_commit(struct drm_crtc *crtc)
- {
--	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
--	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-+	i9xx_crtc_enable(crtc);
-+}
-+
-+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
-+{
-+	ironlake_crtc_disable(crtc);
-+}
-+
-+static void ironlake_crtc_commit(struct drm_crtc *crtc)
-+{
-+	ironlake_crtc_enable(crtc);
- }
- 
- void intel_encoder_prepare (struct drm_encoder *encoder)
-@@ -2460,13 +2591,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
- 
- void intel_encoder_destroy(struct drm_encoder *encoder)
- {
--	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--
--	if (intel_encoder->ddc_bus)
--		intel_i2c_destroy(intel_encoder->ddc_bus);
--
--	if (intel_encoder->i2c_bus)
--		intel_i2c_destroy(intel_encoder->i2c_bus);
-+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- 
- 	drm_encoder_cleanup(encoder);
- 	kfree(intel_encoder);
-@@ -2557,33 +2682,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
- 	return 133000;
- }
- 
--/**
-- * Return the pipe currently connected to the panel fitter,
-- * or -1 if the panel fitter is not present or not in use
-- */
--int intel_panel_fitter_pipe (struct drm_device *dev)
--{
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32  pfit_control;
--
--	/* i830 doesn't have a panel fitter */
--	if (IS_I830(dev))
--		return -1;
--
--	pfit_control = I915_READ(PFIT_CONTROL);
--
--	/* See if the panel fitter is in use */
--	if ((pfit_control & PFIT_ENABLE) == 0)
--		return -1;
--
--	/* 965 can place panel fitter on either pipe */
--	if (IS_I965G(dev))
--		return (pfit_control >> 29) & 0x3;
--
--	/* older chips can only use pipe 1 */
--	return 1;
--}
--
- struct fdi_m_n {
- 	u32        tu;
- 	u32        gmch_m;
-@@ -2902,7 +3000,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
- 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- 
- 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
--			plane ? "B" : "A", size);
-+		      plane ? "B" : "A", size);
- 
- 	return size;
- }
-@@ -2919,7 +3017,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
- 	size >>= 1; /* Convert to cachelines */
- 
- 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
--			plane ? "B" : "A", size);
-+		      plane ? "B" : "A", size);
- 
- 	return size;
- }
-@@ -2934,8 +3032,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
- 	size >>= 2; /* Convert to cachelines */
- 
- 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
--			plane ? "B" : "A",
--		  size);
-+		      plane ? "B" : "A",
-+		      size);
- 
- 	return size;
- }
-@@ -2950,14 +3048,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
- 	size >>= 1; /* Convert to cachelines */
- 
- 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
--			plane ? "B" : "A", size);
-+		      plane ? "B" : "A", size);
- 
- 	return size;
- }
- 
- static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
--			  int planeb_clock, int sr_hdisplay, int unused,
--			  int pixel_size)
-+			       int planeb_clock, int sr_hdisplay, int unused,
-+			       int pixel_size)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	const struct cxsr_latency *latency;
-@@ -3069,13 +3167,13 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
- 
- 		/* Use ns/us then divide to preserve precision */
- 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
--			      pixel_size * sr_hdisplay;
-+			pixel_size * sr_hdisplay;
- 		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
- 
- 		entries_required = (((sr_latency_ns / line_time_us) +
- 				     1000) / 1000) * pixel_size * 64;
- 		entries_required = DIV_ROUND_UP(entries_required,
--					   g4x_cursor_wm_info.cacheline_size);
-+						g4x_cursor_wm_info.cacheline_size);
- 		cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
- 
- 		if (cursor_sr > g4x_cursor_wm_info.max_wm)
-@@ -3087,7 +3185,7 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
- 	} else {
- 		/* Turn off self refresh if both pipes are enabled */
- 		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
--					& ~FW_BLC_SELF_EN);
-+			   & ~FW_BLC_SELF_EN);
- 	}
- 
- 	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
-@@ -3125,7 +3223,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
- 
- 		/* Use ns/us then divide to preserve precision */
- 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
--			      pixel_size * sr_hdisplay;
-+			pixel_size * sr_hdisplay;
- 		sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
- 		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- 		srwm = I965_FIFO_SIZE - sr_entries;
-@@ -3134,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
- 		srwm &= 0x1ff;
- 
- 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
--			     pixel_size * 64;
-+			pixel_size * 64;
- 		sr_entries = DIV_ROUND_UP(sr_entries,
- 					  i965_cursor_wm_info.cacheline_size);
- 		cursor_sr = i965_cursor_wm_info.fifo_size -
--			    (sr_entries + i965_cursor_wm_info.guard_size);
-+			(sr_entries + i965_cursor_wm_info.guard_size);
- 
- 		if (cursor_sr > i965_cursor_wm_info.max_wm)
- 			cursor_sr = i965_cursor_wm_info.max_wm;
-@@ -3146,11 +3244,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
- 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- 			      "cursor %d\n", srwm, cursor_sr);
- 
--		if (IS_I965GM(dev))
-+		if (IS_CRESTLINE(dev))
- 			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- 	} else {
- 		/* Turn off self refresh if both pipes are enabled */
--		if (IS_I965GM(dev))
-+		if (IS_CRESTLINE(dev))
- 			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- 				   & ~FW_BLC_SELF_EN);
- 	}
-@@ -3180,9 +3278,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- 	int sr_clock, sr_entries = 0;
- 
- 	/* Create copies of the base settings for each pipe */
--	if (IS_I965GM(dev) || IS_I945GM(dev))
-+	if (IS_CRESTLINE(dev) || IS_I945GM(dev))
- 		planea_params = planeb_params = i945_wm_info;
--	else if (IS_I9XX(dev))
-+	else if (!IS_GEN2(dev))
- 		planea_params = planeb_params = i915_wm_info;
- 	else
- 		planea_params = planeb_params = i855_wm_info;
-@@ -3217,7 +3315,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- 
- 		/* Use ns/us then divide to preserve precision */
- 		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
--			      pixel_size * sr_hdisplay;
-+			pixel_size * sr_hdisplay;
- 		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
- 		DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
- 		srwm = total_size - sr_entries;
-@@ -3242,7 +3340,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- 	}
- 
- 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
--		  planea_wm, planeb_wm, cwm, srwm);
-+		      planea_wm, planeb_wm, cwm, srwm);
- 
- 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- 	fwater_hi = (cwm & 0x1f);
-@@ -3276,146 +3374,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- #define ILK_LP0_PLANE_LATENCY		700
- #define ILK_LP0_CURSOR_LATENCY		1300
- 
--static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
--		       int planeb_clock, int sr_hdisplay, int sr_htotal,
--		       int pixel_size)
-+static bool ironlake_compute_wm0(struct drm_device *dev,
-+				 int pipe,
-+				 int *plane_wm,
-+				 int *cursor_wm)
- {
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
--	int sr_wm, cursor_wm;
--	unsigned long line_time_us;
--	int sr_clock, entries_required;
--	u32 reg_value;
--	int line_count;
--	int planea_htotal = 0, planeb_htotal = 0;
- 	struct drm_crtc *crtc;
-+	int htotal, hdisplay, clock, pixel_size = 0;
-+	int line_time_us, line_count, entries;
- 
--	/* Need htotal for all active display plane */
--	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
--		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
--			if (intel_crtc->plane == 0)
--				planea_htotal = crtc->mode.htotal;
--			else
--				planeb_htotal = crtc->mode.htotal;
--		}
--	}
--
--	/* Calculate and update the watermark for plane A */
--	if (planea_clock) {
--		entries_required = ((planea_clock / 1000) * pixel_size *
--				     ILK_LP0_PLANE_LATENCY) / 1000;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_display_wm_info.cacheline_size);
--		planea_wm = entries_required +
--			    ironlake_display_wm_info.guard_size;
--
--		if (planea_wm > (int)ironlake_display_wm_info.max_wm)
--			planea_wm = ironlake_display_wm_info.max_wm;
--
--		/* Use the large buffer method to calculate cursor watermark */
--		line_time_us = (planea_htotal * 1000) / planea_clock;
--
--		/* Use ns/us then divide to preserve precision */
--		line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
--
--		/* calculate the cursor watermark for cursor A */
--		entries_required = line_count * 64 * pixel_size;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_cursor_wm_info.cacheline_size);
--		cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
--		if (cursora_wm > ironlake_cursor_wm_info.max_wm)
--			cursora_wm = ironlake_cursor_wm_info.max_wm;
--
--		reg_value = I915_READ(WM0_PIPEA_ILK);
--		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
--		reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
--			     (cursora_wm & WM0_PIPE_CURSOR_MASK);
--		I915_WRITE(WM0_PIPEA_ILK, reg_value);
--		DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
--				"cursor: %d\n", planea_wm, cursora_wm);
--	}
--	/* Calculate and update the watermark for plane B */
--	if (planeb_clock) {
--		entries_required = ((planeb_clock / 1000) * pixel_size *
--				     ILK_LP0_PLANE_LATENCY) / 1000;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_display_wm_info.cacheline_size);
--		planeb_wm = entries_required +
--			    ironlake_display_wm_info.guard_size;
--
--		if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
--			planeb_wm = ironlake_display_wm_info.max_wm;
-+	crtc = intel_get_crtc_for_pipe(dev, pipe);
-+	if (crtc->fb == NULL || !crtc->enabled)
-+		return false;
- 
--		/* Use the large buffer method to calculate cursor watermark */
--		line_time_us = (planeb_htotal * 1000) / planeb_clock;
-+	htotal = crtc->mode.htotal;
-+	hdisplay = crtc->mode.hdisplay;
-+	clock = crtc->mode.clock;
-+	pixel_size = crtc->fb->bits_per_pixel / 8;
-+
-+	/* Use the small buffer method to calculate plane watermark */
-+	entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
-+	entries = DIV_ROUND_UP(entries,
-+			       ironlake_display_wm_info.cacheline_size);
-+	*plane_wm = entries + ironlake_display_wm_info.guard_size;
-+	if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
-+		*plane_wm = ironlake_display_wm_info.max_wm;
-+
-+	/* Use the large buffer method to calculate cursor watermark */
-+	line_time_us = ((htotal * 1000) / clock);
-+	line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-+	entries = line_count * 64 * pixel_size;
-+	entries = DIV_ROUND_UP(entries,
-+			       ironlake_cursor_wm_info.cacheline_size);
-+	*cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
-+	if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
-+		*cursor_wm = ironlake_cursor_wm_info.max_wm;
- 
--		/* Use ns/us then divide to preserve precision */
--		line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-+	return true;
-+}
- 
--		/* calculate the cursor watermark for cursor B */
--		entries_required = line_count * 64 * pixel_size;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_cursor_wm_info.cacheline_size);
--		cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
--		if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
--			cursorb_wm = ironlake_cursor_wm_info.max_wm;
-+static void ironlake_update_wm(struct drm_device *dev,
-+			       int planea_clock, int planeb_clock,
-+			       int sr_hdisplay, int sr_htotal,
-+			       int pixel_size)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int plane_wm, cursor_wm, enabled;
-+	int tmp;
-+
-+	enabled = 0;
-+	if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
-+		I915_WRITE(WM0_PIPEA_ILK,
-+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-+			      " plane %d, " "cursor: %d\n",
-+			      plane_wm, cursor_wm);
-+		enabled++;
-+	}
- 
--		reg_value = I915_READ(WM0_PIPEB_ILK);
--		reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
--		reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
--			     (cursorb_wm & WM0_PIPE_CURSOR_MASK);
--		I915_WRITE(WM0_PIPEB_ILK, reg_value);
--		DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
--				"cursor: %d\n", planeb_wm, cursorb_wm);
-+	if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
-+		I915_WRITE(WM0_PIPEB_ILK,
-+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-+			      " plane %d, cursor: %d\n",
-+			      plane_wm, cursor_wm);
-+		enabled++;
- 	}
- 
- 	/*
- 	 * Calculate and update the self-refresh watermark only when one
- 	 * display plane is used.
- 	 */
--	if (!planea_clock || !planeb_clock) {
--
-+	tmp = 0;
-+	if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
-+		unsigned long line_time_us;
-+		int small, large, plane_fbc;
-+		int sr_clock, entries;
-+		int line_count, line_size;
- 		/* Read the self-refresh latency. The unit is 0.5us */
- 		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
- 
- 		sr_clock = planea_clock ? planea_clock : planeb_clock;
--		line_time_us = ((sr_htotal * 1000) / sr_clock);
-+		line_time_us = (sr_htotal * 1000) / sr_clock;
- 
- 		/* Use ns/us then divide to preserve precision */
- 		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
--			       / 1000;
-+			/ 1000;
-+		line_size = sr_hdisplay * pixel_size;
- 
--		/* calculate the self-refresh watermark for display plane */
--		entries_required = line_count * sr_hdisplay * pixel_size;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_display_srwm_info.cacheline_size);
--		sr_wm = entries_required +
--			ironlake_display_srwm_info.guard_size;
-+		/* Use the minimum of the small and large buffer method for primary */
-+		small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
-+		large = line_count * line_size;
- 
--		/* calculate the self-refresh watermark for display cursor */
--		entries_required = line_count * pixel_size * 64;
--		entries_required = DIV_ROUND_UP(entries_required,
--						ironlake_cursor_srwm_info.cacheline_size);
--		cursor_wm = entries_required +
--			    ironlake_cursor_srwm_info.guard_size;
-+		entries = DIV_ROUND_UP(min(small, large),
-+				       ironlake_display_srwm_info.cacheline_size);
- 
--		/* configure watermark and enable self-refresh */
--		reg_value = I915_READ(WM1_LP_ILK);
--		reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
--			       WM1_LP_CURSOR_MASK);
--		reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
--			     (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
-+		plane_fbc = entries * 64;
-+		plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
- 
--		I915_WRITE(WM1_LP_ILK, reg_value);
--		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
--				"cursor %d\n", sr_wm, cursor_wm);
-+		plane_wm = entries + ironlake_display_srwm_info.guard_size;
-+		if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
-+			plane_wm = ironlake_display_srwm_info.max_wm;
- 
--	} else {
--		/* Turn off self refresh if both pipes are enabled */
--		I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
--	}
-+		/* calculate the self-refresh watermark for display cursor */
-+		entries = line_count * pixel_size * 64;
-+		entries = DIV_ROUND_UP(entries,
-+				       ironlake_cursor_srwm_info.cacheline_size);
-+
-+		cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
-+		if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
-+			cursor_wm = ironlake_cursor_srwm_info.max_wm;
-+
-+		/* configure watermark and enable self-refresh */
-+		tmp = (WM1_LP_SR_EN |
-+		       (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
-+		       (plane_fbc << WM1_LP_FBC_SHIFT) |
-+		       (plane_wm << WM1_LP_SR_SHIFT) |
-+		       cursor_wm);
-+		DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
-+			      " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
-+	}
-+	I915_WRITE(WM1_LP_ILK, tmp);
-+	/* XXX setup WM2 and WM3 */
- }
-+
- /**
-  * intel_update_watermarks - update FIFO watermark values based on current modes
-  *
-@@ -3447,7 +3529,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
-  *
-  * We don't use the sprite, so we can ignore that.  And on Crestline we have
-  * to set the non-SR watermarks to 8.
--  */
-+ */
- static void intel_update_watermarks(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -3463,15 +3545,15 @@ static void intel_update_watermarks(struct drm_device *dev)
- 	/* Get the clock config from both planes */
- 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--		if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
-+		if (intel_crtc->active) {
- 			enabled++;
- 			if (intel_crtc->plane == 0) {
- 				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
--					  intel_crtc->pipe, crtc->mode.clock);
-+					      intel_crtc->pipe, crtc->mode.clock);
- 				planea_clock = crtc->mode.clock;
- 			} else {
- 				DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
--					  intel_crtc->pipe, crtc->mode.clock);
-+					      intel_crtc->pipe, crtc->mode.clock);
- 				planeb_clock = crtc->mode.clock;
- 			}
- 			sr_hdisplay = crtc->mode.hdisplay;
-@@ -3502,62 +3584,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 	int pipe = intel_crtc->pipe;
- 	int plane = intel_crtc->plane;
--	int fp_reg = (pipe == 0) ? FPA0 : FPB0;
--	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
--	int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
--	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
--	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
--	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
--	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
--	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
--	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
--	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
--	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
--	int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
--	int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
--	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
-+	u32 fp_reg, dpll_reg;
- 	int refclk, num_connectors = 0;
- 	intel_clock_t clock, reduced_clock;
--	u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
-+	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
- 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- 	struct intel_encoder *has_edp_encoder = NULL;
- 	struct drm_mode_config *mode_config = &dev->mode_config;
--	struct drm_encoder *encoder;
-+	struct intel_encoder *encoder;
- 	const intel_limit_t *limit;
- 	int ret;
- 	struct fdi_m_n m_n = {0};
--	int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
--	int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
--	int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
--	int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
--	int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
--	int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
--	int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
--	int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
--	int trans_dpll_sel = (pipe == 0) ? 0 : 1;
--	int lvds_reg = LVDS;
--	u32 temp;
--	int sdvo_pixel_multiply;
-+	u32 reg, temp;
- 	int target_clock;
- 
- 	drm_vblank_pre_modeset(dev, pipe);
- 
--	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
--		struct intel_encoder *intel_encoder;
--
--		if (encoder->crtc != crtc)
-+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
-+		if (encoder->base.crtc != crtc)
- 			continue;
- 
--		intel_encoder = enc_to_intel_encoder(encoder);
--		switch (intel_encoder->type) {
-+		switch (encoder->type) {
- 		case INTEL_OUTPUT_LVDS:
- 			is_lvds = true;
- 			break;
- 		case INTEL_OUTPUT_SDVO:
- 		case INTEL_OUTPUT_HDMI:
- 			is_sdvo = true;
--			if (intel_encoder->needs_tv_clock)
-+			if (encoder->needs_tv_clock)
- 				is_tv = true;
- 			break;
- 		case INTEL_OUTPUT_DVO:
-@@ -3573,7 +3628,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 			is_dp = true;
- 			break;
- 		case INTEL_OUTPUT_EDP:
--			has_edp_encoder = intel_encoder;
-+			has_edp_encoder = encoder;
- 			break;
- 		}
- 
-@@ -3583,15 +3638,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
- 		refclk = dev_priv->lvds_ssc_freq * 1000;
- 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
--					refclk / 1000);
--	} else if (IS_I9XX(dev)) {
-+			      refclk / 1000);
-+	} else if (!IS_GEN2(dev)) {
- 		refclk = 96000;
--		if (HAS_PCH_SPLIT(dev))
-+		if (HAS_PCH_SPLIT(dev) &&
-+		    (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
- 			refclk = 120000; /* 120Mhz refclk */
- 	} else {
- 		refclk = 48000;
- 	}
--	
- 
- 	/*
- 	 * Returns a set of divisors for the desired target clock with the given
-@@ -3607,13 +3662,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	}
- 
- 	/* Ensure that the cursor is valid for the new mode before changing... */
--	intel_crtc_update_cursor(crtc);
-+	intel_crtc_update_cursor(crtc, true);
- 
- 	if (is_lvds && dev_priv->lvds_downclock_avail) {
- 		has_reduced_clock = limit->find_pll(limit, crtc,
--							    dev_priv->lvds_downclock,
--							    refclk,
--							    &reduced_clock);
-+						    dev_priv->lvds_downclock,
-+						    refclk,
-+						    &reduced_clock);
- 		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- 			/*
- 			 * If the different P is found, it means that we can't
-@@ -3622,7 +3677,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 			 * feature.
- 			 */
- 			DRM_DEBUG_KMS("Different P is found for "
--						"LVDS clock/downclock\n");
-+				      "LVDS clock/downclock\n");
- 			has_reduced_clock = 0;
- 		}
- 	}
-@@ -3630,14 +3685,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	   this mirrors vbios setting. */
- 	if (is_sdvo && is_tv) {
- 		if (adjusted_mode->clock >= 100000
--				&& adjusted_mode->clock < 140500) {
-+		    && adjusted_mode->clock < 140500) {
- 			clock.p1 = 2;
- 			clock.p2 = 10;
- 			clock.n = 3;
- 			clock.m1 = 16;
- 			clock.m2 = 8;
- 		} else if (adjusted_mode->clock >= 140500
--				&& adjusted_mode->clock <= 200000) {
-+			   && adjusted_mode->clock <= 200000) {
- 			clock.p1 = 1;
- 			clock.p2 = 10;
- 			clock.n = 6;
-@@ -3649,34 +3704,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	/* FDI link */
- 	if (HAS_PCH_SPLIT(dev)) {
- 		int lane = 0, link_bw, bpp;
--		/* eDP doesn't require FDI link, so just set DP M/N
-+		/* CPU eDP doesn't require FDI link, so just set DP M/N
- 		   according to current link config */
--		if (has_edp_encoder) {
-+		if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
- 			target_clock = mode->clock;
- 			intel_edp_link_config(has_edp_encoder,
- 					      &lane, &link_bw);
- 		} else {
--			/* DP over FDI requires target mode clock
-+			/* [e]DP over FDI requires target mode clock
- 			   instead of link clock */
--			if (is_dp)
-+			if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
- 				target_clock = mode->clock;
- 			else
- 				target_clock = adjusted_mode->clock;
--			link_bw = 270000;
-+
-+			/* FDI is a binary signal running at ~2.7GHz, encoding
-+			 * each output octet as 10 bits. The actual frequency
-+			 * is stored as a divider into a 100MHz clock, and the
-+			 * mode pixel clock is stored in units of 1KHz.
-+			 * Hence the bw of each lane in terms of the mode signal
-+			 * is:
-+			 */
-+			link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
- 		}
- 
- 		/* determine panel color depth */
--		temp = I915_READ(pipeconf_reg);
-+		temp = I915_READ(PIPECONF(pipe));
- 		temp &= ~PIPE_BPC_MASK;
- 		if (is_lvds) {
--			int lvds_reg = I915_READ(PCH_LVDS);
- 			/* the BPC will be 6 if it is 18-bit LVDS panel */
--			if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
-+			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- 				temp |= PIPE_8BPC;
- 			else
- 				temp |= PIPE_6BPC;
--		} else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
--			switch (dev_priv->edp_bpp/3) {
-+		} else if (has_edp_encoder) {
-+			switch (dev_priv->edp.bpp/3) {
- 			case 8:
- 				temp |= PIPE_8BPC;
- 				break;
-@@ -3692,8 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 			}
- 		} else
- 			temp |= PIPE_8BPC;
--		I915_WRITE(pipeconf_reg, temp);
--		I915_READ(pipeconf_reg);
-+		I915_WRITE(PIPECONF(pipe), temp);
- 
- 		switch (temp & PIPE_BPC_MASK) {
- 		case PIPE_8BPC:
-@@ -3738,33 +3799,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 		/* Always enable nonspread source */
- 		temp &= ~DREF_NONSPREAD_SOURCE_MASK;
- 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
--		I915_WRITE(PCH_DREF_CONTROL, temp);
--		POSTING_READ(PCH_DREF_CONTROL);
--
- 		temp &= ~DREF_SSC_SOURCE_MASK;
- 		temp |= DREF_SSC_SOURCE_ENABLE;
- 		I915_WRITE(PCH_DREF_CONTROL, temp);
--		POSTING_READ(PCH_DREF_CONTROL);
- 
-+		POSTING_READ(PCH_DREF_CONTROL);
- 		udelay(200);
- 
- 		if (has_edp_encoder) {
- 			if (dev_priv->lvds_use_ssc) {
- 				temp |= DREF_SSC1_ENABLE;
- 				I915_WRITE(PCH_DREF_CONTROL, temp);
--				POSTING_READ(PCH_DREF_CONTROL);
--
--				udelay(200);
- 
--				temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
--				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
--				I915_WRITE(PCH_DREF_CONTROL, temp);
- 				POSTING_READ(PCH_DREF_CONTROL);
-+				udelay(200);
-+			}
-+			temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-+
-+			/* Enable CPU source on CPU attached eDP */
-+			if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-+				if (dev_priv->lvds_use_ssc)
-+					temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
-+				else
-+					temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- 			} else {
--				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
--				I915_WRITE(PCH_DREF_CONTROL, temp);
--				POSTING_READ(PCH_DREF_CONTROL);
-+				/* Enable SSC on PCH eDP if needed */
-+				if (dev_priv->lvds_use_ssc) {
-+					DRM_ERROR("enabling SSC on PCH\n");
-+					temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
-+				}
- 			}
-+			I915_WRITE(PCH_DREF_CONTROL, temp);
-+			POSTING_READ(PCH_DREF_CONTROL);
-+			udelay(200);
- 		}
- 	}
- 
-@@ -3780,23 +3847,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 				reduced_clock.m2;
- 	}
- 
-+	dpll = 0;
- 	if (!HAS_PCH_SPLIT(dev))
- 		dpll = DPLL_VGA_MODE_DIS;
- 
--	if (IS_I9XX(dev)) {
-+	if (!IS_GEN2(dev)) {
- 		if (is_lvds)
- 			dpll |= DPLLB_MODE_LVDS;
- 		else
- 			dpll |= DPLLB_MODE_DAC_SERIAL;
- 		if (is_sdvo) {
-+			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-+			if (pixel_multiplier > 1) {
-+				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-+					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-+				else if (HAS_PCH_SPLIT(dev))
-+					dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-+			}
- 			dpll |= DPLL_DVO_HIGH_SPEED;
--			sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
--			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
--				dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
--			else if (HAS_PCH_SPLIT(dev))
--				dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
- 		}
--		if (is_dp)
-+		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
- 			dpll |= DPLL_DVO_HIGH_SPEED;
- 
- 		/* compute bitmask from p1 value */
-@@ -3824,7 +3894,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- 			break;
- 		}
--		if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
-+		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- 	} else {
- 		if (is_lvds) {
-@@ -3851,7 +3921,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 		dpll |= PLL_REF_INPUT_DREFCLK;
- 
- 	/* setup pipeconf */
--	pipeconf = I915_READ(pipeconf_reg);
-+	pipeconf = I915_READ(PIPECONF(pipe));
- 
- 	/* Set up the display plane register */
- 	dspcntr = DISPPLANE_GAMMA_ENABLE;
-@@ -3865,7 +3935,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 			dspcntr |= DISPPLANE_SEL_PIPE_B;
- 	}
- 
--	if (pipe == 0 && !IS_I965G(dev)) {
-+	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
- 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
- 		 * core speed.
- 		 *
-@@ -3874,51 +3944,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 		 */
- 		if (mode->clock >
- 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
--			pipeconf |= PIPEACONF_DOUBLE_WIDE;
-+			pipeconf |= PIPECONF_DOUBLE_WIDE;
- 		else
--			pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
-+			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
- 	}
- 
- 	dspcntr |= DISPLAY_PLANE_ENABLE;
--	pipeconf |= PIPEACONF_ENABLE;
-+	pipeconf |= PIPECONF_ENABLE;
- 	dpll |= DPLL_VCO_ENABLE;
- 
--
--	/* Disable the panel fitter if it was on our pipe */
--	if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
--		I915_WRITE(PFIT_CONTROL, 0);
--
- 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
- 	drm_mode_debug_printmodeline(mode);
- 
- 	/* assign to Ironlake registers */
- 	if (HAS_PCH_SPLIT(dev)) {
--		fp_reg = pch_fp_reg;
--		dpll_reg = pch_dpll_reg;
-+		fp_reg = PCH_FP0(pipe);
-+		dpll_reg = PCH_DPLL(pipe);
-+	} else {
-+		fp_reg = FP0(pipe);
-+		dpll_reg = DPLL(pipe);
- 	}
- 
--	if (!has_edp_encoder) {
-+	/* PCH eDP needs FDI, but CPU eDP does not */
-+	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- 		I915_WRITE(fp_reg, fp);
- 		I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
--		I915_READ(dpll_reg);
-+
-+		POSTING_READ(dpll_reg);
- 		udelay(150);
- 	}
- 
- 	/* enable transcoder DPLL */
- 	if (HAS_PCH_CPT(dev)) {
- 		temp = I915_READ(PCH_DPLL_SEL);
--		if (trans_dpll_sel == 0)
--			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
-+		if (pipe == 0)
-+			temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
- 		else
--			temp |=	(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-+			temp |=	TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
- 		I915_WRITE(PCH_DPLL_SEL, temp);
--		I915_READ(PCH_DPLL_SEL);
--		udelay(150);
--	}
- 
--	if (HAS_PCH_SPLIT(dev)) {
--		pipeconf &= ~PIPE_ENABLE_DITHER;
--		pipeconf &= ~PIPE_DITHER_TYPE_MASK;
-+		POSTING_READ(PCH_DPLL_SEL);
-+		udelay(150);
- 	}
- 
- 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
-@@ -3926,58 +3992,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	 * things on.
- 	 */
- 	if (is_lvds) {
--		u32 lvds;
--
-+		reg = LVDS;
- 		if (HAS_PCH_SPLIT(dev))
--			lvds_reg = PCH_LVDS;
-+			reg = PCH_LVDS;
- 
--		lvds = I915_READ(lvds_reg);
--		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-+		temp = I915_READ(reg);
-+		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- 		if (pipe == 1) {
- 			if (HAS_PCH_CPT(dev))
--				lvds |= PORT_TRANS_B_SEL_CPT;
-+				temp |= PORT_TRANS_B_SEL_CPT;
- 			else
--				lvds |= LVDS_PIPEB_SELECT;
-+				temp |= LVDS_PIPEB_SELECT;
- 		} else {
- 			if (HAS_PCH_CPT(dev))
--				lvds &= ~PORT_TRANS_SEL_MASK;
-+				temp &= ~PORT_TRANS_SEL_MASK;
- 			else
--				lvds &= ~LVDS_PIPEB_SELECT;
-+				temp &= ~LVDS_PIPEB_SELECT;
- 		}
- 		/* set the corresponsding LVDS_BORDER bit */
--		lvds |= dev_priv->lvds_border_bits;
-+		temp |= dev_priv->lvds_border_bits;
- 		/* Set the B0-B3 data pairs corresponding to whether we're going to
- 		 * set the DPLLs for dual-channel mode or not.
- 		 */
- 		if (clock.p2 == 7)
--			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-+			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- 		else
--			lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-+			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
- 
- 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- 		 * appropriately here, but we need to look more thoroughly into how
- 		 * panels behave in the two modes.
- 		 */
--		/* set the dithering flag */
--		if (IS_I965G(dev)) {
--			if (dev_priv->lvds_dither) {
--				if (HAS_PCH_SPLIT(dev)) {
--					pipeconf |= PIPE_ENABLE_DITHER;
--					pipeconf |= PIPE_DITHER_TYPE_ST01;
--				} else
--					lvds |= LVDS_ENABLE_DITHER;
--			} else {
--				if (!HAS_PCH_SPLIT(dev)) {
--					lvds &= ~LVDS_ENABLE_DITHER;
--				}
--			}
-+		/* set the dithering flag on non-PCH LVDS as needed */
-+		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-+			if (dev_priv->lvds_dither)
-+				temp |= LVDS_ENABLE_DITHER;
-+			else
-+				temp &= ~LVDS_ENABLE_DITHER;
-+		}
-+		I915_WRITE(reg, temp);
-+	}
-+
-+	/* set the dithering flag and clear for anything other than a panel. */
-+	if (HAS_PCH_SPLIT(dev)) {
-+		pipeconf &= ~PIPECONF_DITHER_EN;
-+		pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
-+		if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
-+			pipeconf |= PIPECONF_DITHER_EN;
-+			pipeconf |= PIPECONF_DITHER_TYPE_ST1;
- 		}
--		I915_WRITE(lvds_reg, lvds);
--		I915_READ(lvds_reg);
- 	}
--	if (is_dp)
-+
-+	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
--	else if (HAS_PCH_SPLIT(dev)) {
-+	} else if (HAS_PCH_SPLIT(dev)) {
- 		/* For non-DP output, clear any trans DP clock recovery setting.*/
- 		if (pipe == 0) {
- 			I915_WRITE(TRANSA_DATA_M1, 0);
-@@ -3992,29 +4060,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 		}
- 	}
- 
--	if (!has_edp_encoder) {
-+	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- 		I915_WRITE(fp_reg, fp);
- 		I915_WRITE(dpll_reg, dpll);
--		I915_READ(dpll_reg);
-+
- 		/* Wait for the clocks to stabilize. */
-+		POSTING_READ(dpll_reg);
- 		udelay(150);
- 
--		if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
-+		if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-+			temp = 0;
- 			if (is_sdvo) {
--				sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
--				I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
--					((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
--			} else
--				I915_WRITE(dpll_md_reg, 0);
-+				temp = intel_mode_get_pixel_multiplier(adjusted_mode);
-+				if (temp > 1)
-+					temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-+				else
-+					temp = 0;
-+			}
-+			I915_WRITE(DPLL_MD(pipe), temp);
- 		} else {
- 			/* write it again -- the BIOS does, after all */
- 			I915_WRITE(dpll_reg, dpll);
- 		}
--		I915_READ(dpll_reg);
-+
- 		/* Wait for the clocks to stabilize. */
-+		POSTING_READ(dpll_reg);
- 		udelay(150);
- 	}
- 
-+	intel_crtc->lowfreq_avail = false;
- 	if (is_lvds && has_reduced_clock && i915_powersave) {
- 		I915_WRITE(fp_reg + 4, fp2);
- 		intel_crtc->lowfreq_avail = true;
-@@ -4024,7 +4098,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 		}
- 	} else {
- 		I915_WRITE(fp_reg + 4, fp);
--		intel_crtc->lowfreq_avail = false;
- 		if (HAS_PIPE_CXSR(dev)) {
- 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
-@@ -4043,70 +4116,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- 	} else
- 		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
- 
--	I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
-+	I915_WRITE(HTOTAL(pipe),
-+		   (adjusted_mode->crtc_hdisplay - 1) |
- 		   ((adjusted_mode->crtc_htotal - 1) << 16));
--	I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
-+	I915_WRITE(HBLANK(pipe),
-+		   (adjusted_mode->crtc_hblank_start - 1) |
- 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
--	I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
-+	I915_WRITE(HSYNC(pipe),
-+		   (adjusted_mode->crtc_hsync_start - 1) |
- 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
--	I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
-+
-+	I915_WRITE(VTOTAL(pipe),
-+		   (adjusted_mode->crtc_vdisplay - 1) |
- 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
--	I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
-+	I915_WRITE(VBLANK(pipe),
-+		   (adjusted_mode->crtc_vblank_start - 1) |
- 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
--	I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
-+	I915_WRITE(VSYNC(pipe),
-+		   (adjusted_mode->crtc_vsync_start - 1) |
- 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
--	/* pipesrc and dspsize control the size that is scaled from, which should
--	 * always be the user's requested size.
-+
-+	/* pipesrc and dspsize control the size that is scaled from,
-+	 * which should always be the user's requested size.
- 	 */
- 	if (!HAS_PCH_SPLIT(dev)) {
--		I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
--				(mode->hdisplay - 1));
--		I915_WRITE(dsppos_reg, 0);
-+		I915_WRITE(DSPSIZE(plane),
-+			   ((mode->vdisplay - 1) << 16) |
-+			   (mode->hdisplay - 1));
-+		I915_WRITE(DSPPOS(plane), 0);
- 	}
--	I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-+	I915_WRITE(PIPESRC(pipe),
-+		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- 
- 	if (HAS_PCH_SPLIT(dev)) {
--		I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
--		I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
--		I915_WRITE(link_m1_reg, m_n.link_m);
--		I915_WRITE(link_n1_reg, m_n.link_n);
-+		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
-+		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
-+		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
-+		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- 
--		if (has_edp_encoder) {
-+		if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- 			ironlake_set_pll_edp(crtc, adjusted_mode->clock);
--		} else {
--			/* enable FDI RX PLL too */
--			temp = I915_READ(fdi_rx_reg);
--			I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
--			I915_READ(fdi_rx_reg);
--			udelay(200);
--
--			/* enable FDI TX PLL too */
--			temp = I915_READ(fdi_tx_reg);
--			I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
--			I915_READ(fdi_tx_reg);
--
--			/* enable FDI RX PCDCLK */
--			temp = I915_READ(fdi_rx_reg);
--			I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
--			I915_READ(fdi_rx_reg);
--			udelay(200);
- 		}
- 	}
- 
--	I915_WRITE(pipeconf_reg, pipeconf);
--	I915_READ(pipeconf_reg);
-+	I915_WRITE(PIPECONF(pipe), pipeconf);
-+	POSTING_READ(PIPECONF(pipe));
- 
- 	intel_wait_for_vblank(dev, pipe);
- 
--	if (IS_IRONLAKE(dev)) {
-+	if (IS_GEN5(dev)) {
- 		/* enable address swizzle for tiling buffer */
- 		temp = I915_READ(DISP_ARB_CTL);
- 		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
- 	}
- 
--	I915_WRITE(dspcntr_reg, dspcntr);
-+	I915_WRITE(DSPCNTR(plane), dspcntr);
- 
--	/* Flush the plane changes */
- 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
- 
- 	intel_update_watermarks(dev);
-@@ -4199,7 +4264,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
- }
- 
- /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
--static void intel_crtc_update_cursor(struct drm_crtc *crtc)
-+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
-+				     bool on)
- {
- 	struct drm_device *dev = crtc->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -4212,7 +4278,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
- 
- 	pos = 0;
- 
--	if (intel_crtc->cursor_on && crtc->fb) {
-+	if (on && crtc->enabled && crtc->fb) {
- 		base = intel_crtc->cursor_addr;
- 		if (x > (int) crtc->fb->width)
- 			base = 0;
-@@ -4324,7 +4390,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- 		addr = obj_priv->phys_obj->handle->busaddr;
- 	}
- 
--	if (!IS_I9XX(dev))
-+	if (IS_GEN2(dev))
- 		I915_WRITE(CURSIZE, (height << 12) | width);
- 
-  finish:
-@@ -4344,7 +4410,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- 	intel_crtc->cursor_width = width;
- 	intel_crtc->cursor_height = height;
- 
--	intel_crtc_update_cursor(crtc);
-+	intel_crtc_update_cursor(crtc, true);
- 
- 	return 0;
- fail_unpin:
-@@ -4363,7 +4429,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
- 	intel_crtc->cursor_x = x;
- 	intel_crtc->cursor_y = y;
- 
--	intel_crtc_update_cursor(crtc);
-+	intel_crtc_update_cursor(crtc, true);
- 
- 	return 0;
- }
-@@ -4432,7 +4498,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- 	struct intel_crtc *intel_crtc;
- 	struct drm_crtc *possible_crtc;
- 	struct drm_crtc *supported_crtc =NULL;
--	struct drm_encoder *encoder = &intel_encoder->enc;
-+	struct drm_encoder *encoder = &intel_encoder->base;
- 	struct drm_crtc *crtc = NULL;
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-@@ -4513,7 +4579,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- 				    struct drm_connector *connector, int dpms_mode)
- {
--	struct drm_encoder *encoder = &intel_encoder->enc;
-+	struct drm_encoder *encoder = &intel_encoder->base;
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_crtc *crtc = encoder->crtc;
- 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-@@ -4559,7 +4625,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
- 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- 	}
- 
--	if (IS_I9XX(dev)) {
-+	if (!IS_GEN2(dev)) {
- 		if (IS_PINEVIEW(dev))
- 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
- 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
-@@ -4663,8 +4729,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
- 	struct drm_device *dev = (struct drm_device *)arg;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 
--	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
--
- 	dev_priv->busy = false;
- 
- 	queue_work(dev_priv->wq, &dev_priv->idle_work);
-@@ -4678,14 +4742,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
- 	struct drm_crtc *crtc = &intel_crtc->base;
- 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- 
--	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
--
- 	intel_crtc->busy = false;
- 
- 	queue_work(dev_priv->wq, &dev_priv->idle_work);
- }
- 
--static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
-+static void intel_increase_pllclock(struct drm_crtc *crtc)
- {
- 	struct drm_device *dev = crtc->dev;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -4720,9 +4782,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
- 	}
- 
- 	/* Schedule downclock */
--	if (schedule)
--		mod_timer(&intel_crtc->idle_timer, jiffies +
--			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-+	mod_timer(&intel_crtc->idle_timer, jiffies +
-+		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- }
- 
- static void intel_decrease_pllclock(struct drm_crtc *crtc)
-@@ -4858,7 +4919,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
- 					I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- 				}
- 				/* Non-busy -> busy, upclock */
--				intel_increase_pllclock(crtc, true);
-+				intel_increase_pllclock(crtc);
- 				intel_crtc->busy = true;
- 			} else {
- 				/* Busy -> busy, put off timer */
-@@ -4872,8 +4933,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
- static void intel_crtc_destroy(struct drm_crtc *crtc)
- {
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+	struct drm_device *dev = crtc->dev;
-+	struct intel_unpin_work *work;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&dev->event_lock, flags);
-+	work = intel_crtc->unpin_work;
-+	intel_crtc->unpin_work = NULL;
-+	spin_unlock_irqrestore(&dev->event_lock, flags);
-+
-+	if (work) {
-+		cancel_work_sync(&work->work);
-+		kfree(work);
-+	}
- 
- 	drm_crtc_cleanup(crtc);
-+
- 	kfree(intel_crtc);
- }
- 
-@@ -4928,12 +5003,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
- 
- 	spin_unlock_irqrestore(&dev->event_lock, flags);
- 
--	obj_priv = to_intel_bo(work->pending_flip_obj);
--
--	/* Initial scanout buffer will have a 0 pending flip count */
--	if ((atomic_read(&obj_priv->pending_flip) == 0) ||
--	    atomic_dec_and_test(&obj_priv->pending_flip))
--		DRM_WAKEUP(&dev_priv->pending_flip_queue);
-+	obj_priv = to_intel_bo(work->old_fb_obj);
-+	atomic_clear_mask(1 << intel_crtc->plane,
-+			  &obj_priv->pending_flip.counter);
-+	if (atomic_read(&obj_priv->pending_flip) == 0)
-+		wake_up(&dev_priv->pending_flip_queue);
- 	schedule_work(&work->work);
- 
- 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
-@@ -5014,7 +5088,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
- 	obj = intel_fb->obj;
- 
- 	mutex_lock(&dev->struct_mutex);
--	ret = intel_pin_and_fence_fb_obj(dev, obj);
-+	ret = intel_pin_and_fence_fb_obj(dev, obj, true);
- 	if (ret)
- 		goto cleanup_work;
- 
-@@ -5023,29 +5097,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
- 	drm_gem_object_reference(obj);
- 
- 	crtc->fb = fb;
--	ret = i915_gem_object_flush_write_domain(obj);
--	if (ret)
--		goto cleanup_objs;
- 
- 	ret = drm_vblank_get(dev, intel_crtc->pipe);
- 	if (ret)
- 		goto cleanup_objs;
- 
--	obj_priv = to_intel_bo(obj);
--	atomic_inc(&obj_priv->pending_flip);
-+	/* Block clients from rendering to the new back buffer until
-+	 * the flip occurs and the object is no longer visible.
-+	 */
-+	atomic_add(1 << intel_crtc->plane,
-+		   &to_intel_bo(work->old_fb_obj)->pending_flip);
-+
- 	work->pending_flip_obj = obj;
-+	obj_priv = to_intel_bo(obj);
- 
- 	if (IS_GEN3(dev) || IS_GEN2(dev)) {
- 		u32 flip_mask;
- 
-+		/* Can't queue multiple flips, so wait for the previous
-+		 * one to finish before executing the next.
-+		 */
-+		BEGIN_LP_RING(2);
- 		if (intel_crtc->plane)
- 			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- 		else
- 			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
--
--		BEGIN_LP_RING(2);
- 		OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
--		OUT_RING(0);
-+		OUT_RING(MI_NOOP);
- 		ADVANCE_LP_RING();
- 	}
- 
-@@ -5126,15 +5204,14 @@ cleanup_work:
- 	return ret;
- }
- 
--static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-+static struct drm_crtc_helper_funcs intel_helper_funcs = {
- 	.dpms = intel_crtc_dpms,
- 	.mode_fixup = intel_crtc_mode_fixup,
- 	.mode_set = intel_crtc_mode_set,
- 	.mode_set_base = intel_pipe_set_base,
- 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
--	.prepare = intel_crtc_prepare,
--	.commit = intel_crtc_commit,
- 	.load_lut = intel_crtc_load_lut,
-+	.disable = intel_crtc_disable,
- };
- 
- static const struct drm_crtc_funcs intel_crtc_funcs = {
-@@ -5160,8 +5237,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
- 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
- 
- 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
--	intel_crtc->pipe = pipe;
--	intel_crtc->plane = pipe;
- 	for (i = 0; i < 256; i++) {
- 		intel_crtc->lut_r[i] = i;
- 		intel_crtc->lut_g[i] = i;
-@@ -5171,9 +5246,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
- 	/* Swap pipes & planes for FBC on pre-965 */
- 	intel_crtc->pipe = pipe;
- 	intel_crtc->plane = pipe;
--	if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
-+	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
- 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
--		intel_crtc->plane = ((pipe == 0) ? 1 : 0);
-+		intel_crtc->plane = !pipe;
- 	}
- 
- 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
-@@ -5183,6 +5258,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
- 
- 	intel_crtc->cursor_addr = 0;
- 	intel_crtc->dpms_mode = -1;
-+	intel_crtc->active = true; /* force the pipe off on setup_init_config */
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		intel_helper_funcs.prepare = ironlake_crtc_prepare;
-+		intel_helper_funcs.commit = ironlake_crtc_commit;
-+	} else {
-+		intel_helper_funcs.prepare = i9xx_crtc_prepare;
-+		intel_helper_funcs.commit = i9xx_crtc_commit;
-+	}
-+
- 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
- 
- 	intel_crtc->busy = false;
-@@ -5218,38 +5303,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- 	return 0;
- }
- 
--struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
--{
--	struct drm_crtc *crtc = NULL;
--
--	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
--		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--		if (intel_crtc->pipe == pipe)
--			break;
--	}
--	return crtc;
--}
--
- static int intel_encoder_clones(struct drm_device *dev, int type_mask)
- {
-+	struct intel_encoder *encoder;
- 	int index_mask = 0;
--	struct drm_encoder *encoder;
- 	int entry = 0;
- 
--        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
--		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--		if (type_mask & intel_encoder->clone_mask)
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
-+		if (type_mask & encoder->clone_mask)
- 			index_mask |= (1 << entry);
- 		entry++;
- 	}
-+
- 	return index_mask;
- }
- 
--
- static void intel_setup_outputs(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_encoder *encoder;
-+	struct intel_encoder *encoder;
- 	bool dpd_is_edp = false;
- 
- 	if (IS_MOBILE(dev) && !IS_I830(dev))
-@@ -5338,12 +5410,10 @@ static void intel_setup_outputs(struct drm_device *dev)
- 	if (SUPPORTS_TV(dev))
- 		intel_tv_init(dev);
- 
--	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
--		struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--
--		encoder->possible_crtcs = intel_encoder->crtc_mask;
--		encoder->possible_clones = intel_encoder_clones(dev,
--						intel_encoder->clone_mask);
-+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
-+		encoder->base.possible_crtcs = encoder->crtc_mask;
-+		encoder->base.possible_clones =
-+			intel_encoder_clones(dev, encoder->clone_mask);
- 	}
- }
- 
-@@ -5377,8 +5447,25 @@ int intel_framebuffer_init(struct drm_device *dev,
- 			   struct drm_mode_fb_cmd *mode_cmd,
- 			   struct drm_gem_object *obj)
- {
-+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- 	int ret;
- 
-+	if (obj_priv->tiling_mode == I915_TILING_Y)
-+		return -EINVAL;
-+
-+	if (mode_cmd->pitch & 63)
-+		return -EINVAL;
-+
-+	switch (mode_cmd->bpp) {
-+	case 8:
-+	case 16:
-+	case 24:
-+	case 32:
-+		break;
-+	default:
-+		return -EINVAL;
-+	}
-+
- 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
- 	if (ret) {
- 		DRM_ERROR("framebuffer init failed %d\n", ret);
-@@ -5487,6 +5574,10 @@ void ironlake_enable_drps(struct drm_device *dev)
- 	u32 rgvmodectl = I915_READ(MEMMODECTL);
- 	u8 fmax, fmin, fstart, vstart;
- 
-+	/* Enable temp reporting */
-+	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
-+	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-+
- 	/* 100ms RC evaluation intervals */
- 	I915_WRITE(RCUPEI, 100000);
- 	I915_WRITE(RCDNEI, 100000);
-@@ -5502,20 +5593,19 @@ void ironlake_enable_drps(struct drm_device *dev)
- 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- 		MEMMODE_FSTART_SHIFT;
--	fstart = fmax;
- 
- 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
- 		PXVFREQ_PX_SHIFT;
- 
--	dev_priv->fmax = fstart; /* IPS callback will increase this */
-+	dev_priv->fmax = fmax; /* IPS callback will increase this */
- 	dev_priv->fstart = fstart;
- 
--	dev_priv->max_delay = fmax;
-+	dev_priv->max_delay = fstart;
- 	dev_priv->min_delay = fmin;
- 	dev_priv->cur_delay = fstart;
- 
--	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
--			 fstart);
-+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
-+			 fmax, fmin, fstart);
- 
- 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
- 
-@@ -5529,7 +5619,7 @@ void ironlake_enable_drps(struct drm_device *dev)
- 	rgvmodectl |= MEMMODE_SWMODE_EN;
- 	I915_WRITE(MEMMODECTL, rgvmodectl);
- 
--	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
-+	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
- 		DRM_ERROR("stuck trying to change perf mode\n");
- 	msleep(1);
- 
-@@ -5660,7 +5750,7 @@ void intel_init_clock_gating(struct drm_device *dev)
- 	if (HAS_PCH_SPLIT(dev)) {
- 		uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
- 
--		if (IS_IRONLAKE(dev)) {
-+		if (IS_GEN5(dev)) {
- 			/* Required for FBC */
- 			dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
- 			/* Required for CxSR */
-@@ -5674,13 +5764,20 @@ void intel_init_clock_gating(struct drm_device *dev)
- 		I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
- 
- 		/*
-+		 * On Ibex Peak and Cougar Point, we need to disable clock
-+		 * gating for the panel power sequencer or it will fail to
-+		 * start up when no ports are active.
-+		 */
-+		I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-+
-+		/*
- 		 * According to the spec the following bits should be set in
- 		 * order to enable memory self-refresh
- 		 * The bit 22/21 of 0x42004
- 		 * The bit 5 of 0x42020
- 		 * The bit 15 of 0x45000
- 		 */
--		if (IS_IRONLAKE(dev)) {
-+		if (IS_GEN5(dev)) {
- 			I915_WRITE(ILK_DISPLAY_CHICKEN2,
- 					(I915_READ(ILK_DISPLAY_CHICKEN2) |
- 					ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-@@ -5728,20 +5825,20 @@ void intel_init_clock_gating(struct drm_device *dev)
- 		if (IS_GM45(dev))
- 			dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- 		I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
--	} else if (IS_I965GM(dev)) {
-+	} else if (IS_CRESTLINE(dev)) {
- 		I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- 		I915_WRITE(RENCLK_GATE_D2, 0);
- 		I915_WRITE(DSPCLK_GATE_D, 0);
- 		I915_WRITE(RAMCLK_GATE_D, 0);
- 		I915_WRITE16(DEUC, 0);
--	} else if (IS_I965G(dev)) {
-+	} else if (IS_BROADWATER(dev)) {
- 		I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- 		       I965_RCC_CLOCK_GATE_DISABLE |
- 		       I965_RCPB_CLOCK_GATE_DISABLE |
- 		       I965_ISC_CLOCK_GATE_DISABLE |
- 		       I965_FBC_CLOCK_GATE_DISABLE);
- 		I915_WRITE(RENCLK_GATE_D2, 0);
--	} else if (IS_I9XX(dev)) {
-+	} else if (IS_GEN3(dev)) {
- 		u32 dstate = I915_READ(D_STATE);
- 
- 		dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
-@@ -5823,7 +5920,7 @@ static void intel_init_display(struct drm_device *dev)
- 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- 			dev_priv->display.enable_fbc = g4x_enable_fbc;
- 			dev_priv->display.disable_fbc = g4x_disable_fbc;
--		} else if (IS_I965GM(dev)) {
-+		} else if (IS_CRESTLINE(dev)) {
- 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
- 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
-@@ -5856,7 +5953,7 @@ static void intel_init_display(struct drm_device *dev)
- 
- 	/* For FIFO watermark updates */
- 	if (HAS_PCH_SPLIT(dev)) {
--		if (IS_IRONLAKE(dev)) {
-+		if (IS_GEN5(dev)) {
- 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
- 				dev_priv->display.update_wm = ironlake_update_wm;
- 			else {
-@@ -5883,9 +5980,9 @@ static void intel_init_display(struct drm_device *dev)
- 			dev_priv->display.update_wm = pineview_update_wm;
- 	} else if (IS_G4X(dev))
- 		dev_priv->display.update_wm = g4x_update_wm;
--	else if (IS_I965G(dev))
-+	else if (IS_GEN4(dev))
- 		dev_priv->display.update_wm = i965_update_wm;
--	else if (IS_I9XX(dev)) {
-+	else if (IS_GEN3(dev)) {
- 		dev_priv->display.update_wm = i9xx_update_wm;
- 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- 	} else if (IS_I85X(dev)) {
-@@ -5999,24 +6096,24 @@ void intel_modeset_init(struct drm_device *dev)
- 
- 	intel_init_display(dev);
- 
--	if (IS_I965G(dev)) {
--		dev->mode_config.max_width = 8192;
--		dev->mode_config.max_height = 8192;
--	} else if (IS_I9XX(dev)) {
-+	if (IS_GEN2(dev)) {
-+		dev->mode_config.max_width = 2048;
-+		dev->mode_config.max_height = 2048;
-+	} else if (IS_GEN3(dev)) {
- 		dev->mode_config.max_width = 4096;
- 		dev->mode_config.max_height = 4096;
- 	} else {
--		dev->mode_config.max_width = 2048;
--		dev->mode_config.max_height = 2048;
-+		dev->mode_config.max_width = 8192;
-+		dev->mode_config.max_height = 8192;
- 	}
- 
- 	/* set memory base */
--	if (IS_I9XX(dev))
--		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
--	else
-+	if (IS_GEN2(dev))
- 		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
-+	else
-+		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- 
--	if (IS_MOBILE(dev) || IS_I9XX(dev))
-+	if (IS_MOBILE(dev) || !IS_GEN2(dev))
- 		dev_priv->num_pipe = 2;
- 	else
- 		dev_priv->num_pipe = 1;
-@@ -6052,10 +6149,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
- 	struct drm_crtc *crtc;
- 	struct intel_crtc *intel_crtc;
- 
-+	drm_kms_helper_poll_fini(dev);
- 	mutex_lock(&dev->struct_mutex);
- 
--	drm_kms_helper_poll_fini(dev);
--	intel_fbdev_fini(dev);
-+	intel_unregister_dsm_handler();
-+
- 
- 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- 		/* Skip inactive CRTCs */
-@@ -6063,12 +6161,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
- 			continue;
- 
- 		intel_crtc = to_intel_crtc(crtc);
--		intel_increase_pllclock(crtc, false);
--		del_timer_sync(&intel_crtc->idle_timer);
-+		intel_increase_pllclock(crtc);
- 	}
- 
--	del_timer_sync(&dev_priv->idle_timer);
--
- 	if (dev_priv->display.disable_fbc)
- 		dev_priv->display.disable_fbc(dev);
- 
-@@ -6097,33 +6192,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
- 
- 	mutex_unlock(&dev->struct_mutex);
- 
-+	/* Disable the irq before mode object teardown, for the irq might
-+	 * enqueue unpin/hotplug work. */
-+	drm_irq_uninstall(dev);
-+	cancel_work_sync(&dev_priv->hotplug_work);
-+
-+	/* Shut off idle work before the crtcs get freed. */
-+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+		intel_crtc = to_intel_crtc(crtc);
-+		del_timer_sync(&intel_crtc->idle_timer);
-+	}
-+	del_timer_sync(&dev_priv->idle_timer);
-+	cancel_work_sync(&dev_priv->idle_work);
-+
- 	drm_mode_config_cleanup(dev);
- }
- 
--
- /*
-  * Return which encoder is currently attached for connector.
-  */
--struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
-+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
- {
--	struct drm_mode_object *obj;
--	struct drm_encoder *encoder;
--	int i;
--
--	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
--		if (connector->encoder_ids[i] == 0)
--			break;
--
--		obj = drm_mode_object_find(connector->dev,
--                                           connector->encoder_ids[i],
--                                           DRM_MODE_OBJECT_ENCODER);
--		if (!obj)
--			continue;
-+	return &intel_attached_encoder(connector)->base;
-+}
- 
--		encoder = obj_to_encoder(obj);
--		return encoder;
--	}
--	return NULL;
-+void intel_connector_attach_encoder(struct intel_connector *connector,
-+				    struct intel_encoder *encoder)
-+{
-+	connector->encoder = encoder;
-+	drm_mode_connector_attach_encoder(&connector->base,
-+					  &encoder->base);
- }
- 
- /*
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index 9ab8708..2d3dee9 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -42,15 +42,13 @@
- 
- #define DP_LINK_CONFIGURATION_SIZE	9
- 
--#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
--#define IS_PCH_eDP(i) ((i)->is_pch_edp)
--
- struct intel_dp {
- 	struct intel_encoder base;
- 	uint32_t output_reg;
- 	uint32_t DP;
- 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
- 	bool has_audio;
-+	int force_audio;
- 	int dpms_mode;
- 	uint8_t link_bw;
- 	uint8_t lane_count;
-@@ -58,14 +56,69 @@ struct intel_dp {
- 	struct i2c_adapter adapter;
- 	struct i2c_algo_dp_aux_data algo;
- 	bool is_pch_edp;
-+	uint8_t	train_set[4];
-+	uint8_t link_status[DP_LINK_STATUS_SIZE];
-+
-+	struct drm_property *force_audio_property;
- };
- 
-+/**
-+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
-+ * @intel_dp: DP struct
-+ *
-+ * If a CPU or PCH DP output is attached to an eDP panel, this function
-+ * will return true, and false otherwise.
-+ */
-+static bool is_edp(struct intel_dp *intel_dp)
-+{
-+	return intel_dp->base.type == INTEL_OUTPUT_EDP;
-+}
-+
-+/**
-+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
-+ * @intel_dp: DP struct
-+ *
-+ * Returns true if the given DP struct corresponds to a PCH DP port attached
-+ * to an eDP panel, false otherwise.  Helpful for determining whether we
-+ * may need FDI resources for a given DP output or not.
-+ */
-+static bool is_pch_edp(struct intel_dp *intel_dp)
-+{
-+	return intel_dp->is_pch_edp;
-+}
-+
- static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
-+	return container_of(encoder, struct intel_dp, base.base);
-+}
-+
-+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
-+{
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_dp, base);
-+}
-+
-+/**
-+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
-+ * @encoder: DRM encoder
-+ *
-+ * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
-+ * by intel_display.c.
-+ */
-+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
-+{
-+	struct intel_dp *intel_dp;
-+
-+	if (!encoder)
-+		return false;
-+
-+	intel_dp = enc_to_intel_dp(encoder);
-+
-+	return is_pch_edp(intel_dp);
- }
- 
--static void intel_dp_link_train(struct intel_dp *intel_dp);
-+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
- static void intel_dp_link_down(struct intel_dp *intel_dp);
- 
- void
-@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 
--	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
--		return (pixel_clock * dev_priv->edp_bpp) / 8;
-+	if (is_edp(intel_dp))
-+		return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
- 	else
- 		return pixel_clock * 3;
- }
-@@ -145,15 +198,13 @@ static int
- intel_dp_mode_valid(struct drm_connector *connector,
- 		    struct drm_display_mode *mode)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-+	struct intel_dp *intel_dp = intel_attached_dp(connector);
- 	struct drm_device *dev = connector->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- 	int max_lanes = intel_dp_max_lane_count(intel_dp);
- 
--	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
--	    dev_priv->panel_fixed_mode) {
-+	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
- 		if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
- 			return MODE_PANEL;
- 
-@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
- 
- 	/* only refuse the mode on non eDP since we have seen some wierd eDP panels
- 	   which are outside spec tolerances but somehow work by magic */
--	if (!IS_eDP(intel_dp) &&
-+	if (!is_edp(intel_dp) &&
- 	    (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
- 	     > intel_dp_max_data_rate(max_link_clock, max_lanes)))
- 		return MODE_CLOCK_HIGH;
-@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
- 		uint8_t *recv, int recv_size)
- {
- 	uint32_t output_reg = intel_dp->output_reg;
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	uint32_t ch_ctl = output_reg + 0x10;
- 	uint32_t ch_data = ch_ctl + 4;
-@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
- 	/* The clock divider is based off the hrawclk,
- 	 * and would like to run at 2MHz. So, take the
- 	 * hrawclk value and divide by 2 and use that
-+	 *
-+	 * Note that PCH attached eDP panels should use a 125MHz input
-+	 * clock divider.
- 	 */
--	if (IS_eDP(intel_dp)) {
-+	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
- 		if (IS_GEN6(dev))
- 			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
- 		else
-@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- 
--	if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
--	    dev_priv->panel_fixed_mode) {
-+	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
- 		intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
- 		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
- 					mode, adjusted_mode);
-@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 		mode->clock = dev_priv->panel_fixed_mode->clock;
- 	}
- 
-+	/* Just use VBT values for eDP */
-+	if (is_edp(intel_dp)) {
-+		intel_dp->lane_count = dev_priv->edp.lanes;
-+		intel_dp->link_bw = dev_priv->edp.rate;
-+		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
-+		DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
-+			      intel_dp->link_bw, intel_dp->lane_count,
-+			      adjusted_mode->clock);
-+		return true;
-+	}
-+
- 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- 		for (clock = 0; clock <= max_clock; clock++) {
- 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
-@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 		}
- 	}
- 
--	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
--		/* okay we failed just pick the highest */
--		intel_dp->lane_count = max_lane_count;
--		intel_dp->link_bw = bws[max_clock];
--		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
--		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
--			      "count %d clock %d\n",
--			      intel_dp->link_bw, intel_dp->lane_count,
--			      adjusted_mode->clock);
--
--		return true;
--	}
--
- 	return false;
- }
- 
-@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
- 	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
- }
- 
--bool intel_pch_has_edp(struct drm_crtc *crtc)
--{
--	struct drm_device *dev = crtc->dev;
--	struct drm_mode_config *mode_config = &dev->mode_config;
--	struct drm_encoder *encoder;
--
--	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
--		struct intel_dp *intel_dp;
--
--		if (encoder->crtc != crtc)
--			continue;
--
--		intel_dp = enc_to_intel_dp(encoder);
--		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
--			return intel_dp->is_pch_edp;
--	}
--	return false;
--}
--
- void
- intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- 		 struct drm_display_mode *adjusted_mode)
-@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- 		intel_dp = enc_to_intel_dp(encoder);
- 		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- 			lane_count = intel_dp->lane_count;
--			if (IS_PCH_eDP(intel_dp))
--				bpp = dev_priv->edp_bpp;
-+			break;
-+		} else if (is_edp(intel_dp)) {
-+			lane_count = dev_priv->edp.lanes;
-+			bpp = dev_priv->edp.bpp;
- 			break;
- 		}
- 	}
-@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
--	struct drm_crtc *crtc = intel_dp->base.enc.crtc;
-+	struct drm_crtc *crtc = intel_dp->base.base.crtc;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- 
- 	intel_dp->DP = (DP_VOLTAGE_0_4 |
-@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- 		intel_dp->DP |= DP_SYNC_VS_HIGH;
- 
--	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
-+	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
- 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- 	else
- 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
-@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 	if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- 		intel_dp->DP |= DP_PIPEB_SELECT;
- 
--	if (IS_eDP(intel_dp)) {
-+	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
- 		/* don't miss out required setting for eDP */
- 		intel_dp->DP |= DP_PLL_ENABLE;
- 		if (adjusted_mode->clock < 200000)
-@@ -754,13 +788,16 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 	}
- }
- 
--static void ironlake_edp_panel_on (struct drm_device *dev)
-+/* Returns true if the panel was already on when called */
-+static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
- {
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 pp;
-+	u32 pp, idle_on = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
-+	u32 idle_on_mask = PP_ON | PP_SEQUENCE_STATE_MASK;
- 
- 	if (I915_READ(PCH_PP_STATUS) & PP_ON)
--		return;
-+		return true;
- 
- 	pp = I915_READ(PCH_PP_CONTROL);
- 
-@@ -771,21 +808,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
- 
- 	pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
- 	I915_WRITE(PCH_PP_CONTROL, pp);
-+	POSTING_READ(PCH_PP_CONTROL);
- 
--	if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
-+	/* Ouch. We need to wait here for some panels, like Dell e6510
-+	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
-+	 */
-+	msleep(300);
-+
-+	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on,
-+		     5000))
- 		DRM_ERROR("panel on wait timed out: 0x%08x\n",
- 			  I915_READ(PCH_PP_STATUS));
- 
--	pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
- 	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- 	I915_WRITE(PCH_PP_CONTROL, pp);
- 	POSTING_READ(PCH_PP_CONTROL);
-+
-+	return false;
- }
- 
- static void ironlake_edp_panel_off (struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 pp;
-+	u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
-+		PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
- 
- 	pp = I915_READ(PCH_PP_CONTROL);
- 
-@@ -796,15 +842,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
- 
- 	pp &= ~POWER_TARGET_ON;
- 	I915_WRITE(PCH_PP_CONTROL, pp);
-+	POSTING_READ(PCH_PP_CONTROL);
- 
--	if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
-+	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
- 		DRM_ERROR("panel off wait timed out: 0x%08x\n",
- 			  I915_READ(PCH_PP_STATUS));
- 
--	/* Make sure VDD is enabled so DP AUX will work */
--	pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
-+	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- 	I915_WRITE(PCH_PP_CONTROL, pp);
- 	POSTING_READ(PCH_PP_CONTROL);
-+
-+	/* Ouch. We need to wait here for some panels, like Dell e6510
-+	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
-+	 */
-+	msleep(300);
- }
- 
- static void ironlake_edp_backlight_on (struct drm_device *dev)
-@@ -813,6 +864,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
- 	u32 pp;
- 
- 	DRM_DEBUG_KMS("\n");
-+	/*
-+	 * If we enable the backlight right away following a panel power
-+	 * on, we may see slight flicker as the panel syncs with the eDP
-+	 * link.  So delay a bit to make sure the image is solid before
-+	 * allowing it to appear.
-+	 */
-+	msleep(300);
- 	pp = I915_READ(PCH_PP_CONTROL);
- 	pp |= EDP_BLC_ENABLE;
- 	I915_WRITE(PCH_PP_CONTROL, pp);
-@@ -837,8 +895,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
- 
- 	DRM_DEBUG_KMS("\n");
- 	dpa_ctl = I915_READ(DP_A);
--	dpa_ctl &= ~DP_PLL_ENABLE;
-+	dpa_ctl |= DP_PLL_ENABLE;
- 	I915_WRITE(DP_A, dpa_ctl);
-+	POSTING_READ(DP_A);
-+	udelay(200);
- }
- 
- static void ironlake_edp_pll_off(struct drm_encoder *encoder)
-@@ -848,8 +908,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
- 	u32 dpa_ctl;
- 
- 	dpa_ctl = I915_READ(DP_A);
--	dpa_ctl |= DP_PLL_ENABLE;
-+	dpa_ctl &= ~DP_PLL_ENABLE;
- 	I915_WRITE(DP_A, dpa_ctl);
-+	POSTING_READ(DP_A);
- 	udelay(200);
- }
- 
-@@ -857,29 +918,32 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
- {
- 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- 	struct drm_device *dev = encoder->dev;
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- 
--	if (IS_eDP(intel_dp)) {
-+	if (is_edp(intel_dp)) {
- 		ironlake_edp_backlight_off(dev);
--		ironlake_edp_panel_on(dev);
--		ironlake_edp_pll_on(encoder);
-+		ironlake_edp_panel_off(dev);
-+		ironlake_edp_panel_on(intel_dp);
-+		if (!is_pch_edp(intel_dp))
-+			ironlake_edp_pll_on(encoder);
-+		else
-+			ironlake_edp_pll_off(encoder);
- 	}
--	if (dp_reg & DP_PORT_EN)
--		intel_dp_link_down(intel_dp);
-+	intel_dp_link_down(intel_dp);
- }
- 
- static void intel_dp_commit(struct drm_encoder *encoder)
- {
- 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- 	struct drm_device *dev = encoder->dev;
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- 
--	if (!(dp_reg & DP_PORT_EN)) {
--		intel_dp_link_train(intel_dp);
--	}
--	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
-+	intel_dp_start_link_train(intel_dp);
-+
-+	if (is_edp(intel_dp))
-+		ironlake_edp_panel_on(intel_dp);
-+
-+	intel_dp_complete_link_train(intel_dp);
-+
-+	if (is_edp(intel_dp))
- 		ironlake_edp_backlight_on(dev);
- }
- 
-@@ -892,22 +956,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
- 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- 
- 	if (mode != DRM_MODE_DPMS_ON) {
--		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
-+		if (is_edp(intel_dp))
- 			ironlake_edp_backlight_off(dev);
-+		intel_dp_link_down(intel_dp);
-+		if (is_edp(intel_dp))
- 			ironlake_edp_panel_off(dev);
--		}
--		if (dp_reg & DP_PORT_EN)
--			intel_dp_link_down(intel_dp);
--		if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
-+		if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
- 			ironlake_edp_pll_off(encoder);
- 	} else {
-+		if (is_edp(intel_dp))
-+			ironlake_edp_panel_on(intel_dp);
- 		if (!(dp_reg & DP_PORT_EN)) {
--			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
--				ironlake_edp_panel_on(dev);
--			intel_dp_link_train(intel_dp);
--			if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
--				ironlake_edp_backlight_on(dev);
-+			intel_dp_start_link_train(intel_dp);
-+			intel_dp_complete_link_train(intel_dp);
- 		}
-+		if (is_edp(intel_dp))
-+			ironlake_edp_backlight_on(dev);
- 	}
- 	intel_dp->dpms_mode = mode;
- }
-@@ -917,14 +981,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
-  * link status information
-  */
- static bool
--intel_dp_get_link_status(struct intel_dp *intel_dp,
--			 uint8_t link_status[DP_LINK_STATUS_SIZE])
-+intel_dp_get_link_status(struct intel_dp *intel_dp)
- {
- 	int ret;
- 
- 	ret = intel_dp_aux_native_read(intel_dp,
- 				       DP_LANE0_1_STATUS,
--				       link_status, DP_LINK_STATUS_SIZE);
-+				       intel_dp->link_status, DP_LINK_STATUS_SIZE);
- 	if (ret != DP_LINK_STATUS_SIZE)
- 		return false;
- 	return true;
-@@ -999,18 +1062,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
- }
- 
- static void
--intel_get_adjust_train(struct intel_dp *intel_dp,
--		       uint8_t link_status[DP_LINK_STATUS_SIZE],
--		       int lane_count,
--		       uint8_t train_set[4])
-+intel_get_adjust_train(struct intel_dp *intel_dp)
- {
- 	uint8_t v = 0;
- 	uint8_t p = 0;
- 	int lane;
- 
--	for (lane = 0; lane < lane_count; lane++) {
--		uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
--		uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
-+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
-+		uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
-+		uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
- 
- 		if (this_v > v)
- 			v = this_v;
-@@ -1025,15 +1085,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
- 		p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
- 
- 	for (lane = 0; lane < 4; lane++)
--		train_set[lane] = v | p;
-+		intel_dp->train_set[lane] = v | p;
- }
- 
- static uint32_t
--intel_dp_signal_levels(uint8_t train_set, int lane_count)
-+intel_dp_signal_levels(struct intel_dp *intel_dp)
- {
--	uint32_t	signal_levels = 0;
-+	struct drm_device *dev = intel_dp->base.base.dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	uint32_t signal_levels = 0;
-+	u8 train_set = intel_dp->train_set[0];
-+	u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
-+	u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
-+
-+	if (is_edp(intel_dp)) {
-+		vswing = dev_priv->edp.vswing;
-+		preemphasis = dev_priv->edp.preemphasis;
-+	}
- 
--	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
-+	switch (vswing) {
- 	case DP_TRAIN_VOLTAGE_SWING_400:
- 	default:
- 		signal_levels |= DP_VOLTAGE_0_4;
-@@ -1048,7 +1118,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
- 		signal_levels |= DP_VOLTAGE_1_2;
- 		break;
- 	}
--	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
-+	switch (preemphasis) {
- 	case DP_TRAIN_PRE_EMPHASIS_0:
- 	default:
- 		signal_levels |= DP_PRE_EMPHASIS_0;
-@@ -1116,18 +1186,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
- 			 DP_LANE_CHANNEL_EQ_DONE|\
- 			 DP_LANE_SYMBOL_LOCKED)
- static bool
--intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
-+intel_channel_eq_ok(struct intel_dp *intel_dp)
- {
- 	uint8_t lane_align;
- 	uint8_t lane_status;
- 	int lane;
- 
--	lane_align = intel_dp_link_status(link_status,
-+	lane_align = intel_dp_link_status(intel_dp->link_status,
- 					  DP_LANE_ALIGN_STATUS_UPDATED);
- 	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- 		return false;
--	for (lane = 0; lane < lane_count; lane++) {
--		lane_status = intel_get_lane_status(link_status, lane);
-+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
-+		lane_status = intel_get_lane_status(intel_dp->link_status, lane);
- 		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- 			return false;
- 	}
-@@ -1135,159 +1205,194 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
- }
- 
- static bool
-+intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
-+{
-+	struct drm_device *dev = intel_dp->base.base.dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+	if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
-+		return false;
-+
-+	return true;
-+}
-+
-+static bool
- intel_dp_set_link_train(struct intel_dp *intel_dp,
- 			uint32_t dp_reg_value,
--			uint8_t dp_train_pat,
--			uint8_t train_set[4])
-+			uint8_t dp_train_pat)
- {
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	int ret;
- 
- 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
- 	POSTING_READ(intel_dp->output_reg);
- 
-+	if (!intel_dp_aux_handshake_required(intel_dp))
-+		return true;
-+
- 	intel_dp_aux_native_write_1(intel_dp,
- 				    DP_TRAINING_PATTERN_SET,
- 				    dp_train_pat);
- 
- 	ret = intel_dp_aux_native_write(intel_dp,
--					DP_TRAINING_LANE0_SET, train_set, 4);
-+					DP_TRAINING_LANE0_SET,
-+					intel_dp->train_set, 4);
- 	if (ret != 4)
- 		return false;
- 
- 	return true;
- }
- 
-+/* Enable corresponding port and start training pattern 1 */
- static void
--intel_dp_link_train(struct intel_dp *intel_dp)
-+intel_dp_start_link_train(struct intel_dp *intel_dp)
- {
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	uint8_t	train_set[4];
--	uint8_t link_status[DP_LINK_STATUS_SIZE];
-+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
- 	int i;
- 	uint8_t voltage;
- 	bool clock_recovery = false;
--	bool channel_eq = false;
- 	int tries;
- 	u32 reg;
- 	uint32_t DP = intel_dp->DP;
--	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
- 
- 	/* Enable output, wait for it to become active */
- 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- 	POSTING_READ(intel_dp->output_reg);
- 	intel_wait_for_vblank(dev, intel_crtc->pipe);
- 
--	/* Write the link configuration data */
--	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
--				  intel_dp->link_configuration,
--				  DP_LINK_CONFIGURATION_SIZE);
-+	if (intel_dp_aux_handshake_required(intel_dp))
-+		/* Write the link configuration data */
-+		intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
-+					  intel_dp->link_configuration,
-+					  DP_LINK_CONFIGURATION_SIZE);
- 
- 	DP |= DP_PORT_EN;
--	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
-+	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
- 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
- 	else
- 		DP &= ~DP_LINK_TRAIN_MASK;
--	memset(train_set, 0, 4);
-+	memset(intel_dp->train_set, 0, 4);
- 	voltage = 0xff;
- 	tries = 0;
- 	clock_recovery = false;
- 	for (;;) {
--		/* Use train_set[0] to set the voltage and pre emphasis values */
-+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- 		uint32_t    signal_levels;
--		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
--			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
-+		if (IS_GEN6(dev) && is_edp(intel_dp)) {
-+			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- 		} else {
--			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
-+			signal_levels = intel_dp_signal_levels(intel_dp);
- 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- 		}
- 
--		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
-+		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
- 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
- 		else
- 			reg = DP | DP_LINK_TRAIN_PAT_1;
- 
- 		if (!intel_dp_set_link_train(intel_dp, reg,
--					     DP_TRAINING_PATTERN_1, train_set))
-+					     DP_TRAINING_PATTERN_1))
- 			break;
- 		/* Set training pattern 1 */
- 
--		udelay(100);
--		if (!intel_dp_get_link_status(intel_dp, link_status))
-+		udelay(500);
-+		if (intel_dp_aux_handshake_required(intel_dp)) {
- 			break;
-+		} else {
-+			if (!intel_dp_get_link_status(intel_dp))
-+				break;
- 
--		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
--			clock_recovery = true;
--			break;
--		}
--
--		/* Check to see if we've tried the max voltage */
--		for (i = 0; i < intel_dp->lane_count; i++)
--			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-+			if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
-+				clock_recovery = true;
- 				break;
--		if (i == intel_dp->lane_count)
--			break;
-+			}
- 
--		/* Check to see if we've tried the same voltage 5 times */
--		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
--			++tries;
--			if (tries == 5)
-+			/* Check to see if we've tried the max voltage */
-+			for (i = 0; i < intel_dp->lane_count; i++)
-+				if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-+					break;
-+			if (i == intel_dp->lane_count)
- 				break;
--		} else
--			tries = 0;
--		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- 
--		/* Compute new train_set as requested by target */
--		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
-+			/* Check to see if we've tried the same voltage 5 times */
-+			if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-+				++tries;
-+				if (tries == 5)
-+					break;
-+			} else
-+				tries = 0;
-+			voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
-+
-+			/* Compute new intel_dp->train_set as requested by target */
-+			intel_get_adjust_train(intel_dp);
-+		}
- 	}
- 
-+	intel_dp->DP = DP;
-+}
-+
-+static void
-+intel_dp_complete_link_train(struct intel_dp *intel_dp)
-+{
-+	struct drm_device *dev = intel_dp->base.base.dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	bool channel_eq = false;
-+	int tries;
-+	u32 reg;
-+	uint32_t DP = intel_dp->DP;
-+
- 	/* channel equalization */
- 	tries = 0;
- 	channel_eq = false;
- 	for (;;) {
--		/* Use train_set[0] to set the voltage and pre emphasis values */
-+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
- 		uint32_t    signal_levels;
- 
--		if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
--			signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
-+		if (IS_GEN6(dev) && is_edp(intel_dp)) {
-+			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
- 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
- 		} else {
--			signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
-+			signal_levels = intel_dp_signal_levels(intel_dp);
- 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
- 		}
- 
--		if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
-+		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
- 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
- 		else
- 			reg = DP | DP_LINK_TRAIN_PAT_2;
- 
- 		/* channel eq pattern */
- 		if (!intel_dp_set_link_train(intel_dp, reg,
--					     DP_TRAINING_PATTERN_2, train_set))
-+					     DP_TRAINING_PATTERN_2))
- 			break;
- 
--		udelay(400);
--		if (!intel_dp_get_link_status(intel_dp, link_status))
--			break;
-+		udelay(500);
- 
--		if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
--			channel_eq = true;
-+		if (!intel_dp_aux_handshake_required(intel_dp)) {
- 			break;
--		}
-+		} else {
-+			if (!intel_dp_get_link_status(intel_dp))
-+				break;
- 
--		/* Try 5 times */
--		if (tries > 5)
--			break;
-+			if (intel_channel_eq_ok(intel_dp)) {
-+				channel_eq = true;
-+				break;
-+			}
- 
--		/* Compute new train_set as requested by target */
--		intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
--		++tries;
--	}
-+			/* Try 5 times */
-+			if (tries > 5)
-+				break;
- 
--	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
-+			/* Compute new intel_dp->train_set as requested by target */
-+			intel_get_adjust_train(intel_dp);
-+			++tries;
-+		}
-+	}
-+	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
- 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
- 	else
- 		reg = DP | DP_LINK_TRAIN_OFF;
-@@ -1301,32 +1406,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
- static void
- intel_dp_link_down(struct intel_dp *intel_dp)
- {
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	uint32_t DP = intel_dp->DP;
- 
- 	DRM_DEBUG_KMS("\n");
- 
--	if (IS_eDP(intel_dp)) {
-+	if (is_edp(intel_dp)) {
- 		DP &= ~DP_PLL_ENABLE;
- 		I915_WRITE(intel_dp->output_reg, DP);
- 		POSTING_READ(intel_dp->output_reg);
- 		udelay(100);
- 	}
- 
--	if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
-+	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
- 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
- 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
--		POSTING_READ(intel_dp->output_reg);
- 	} else {
- 		DP &= ~DP_LINK_TRAIN_MASK;
- 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
--		POSTING_READ(intel_dp->output_reg);
- 	}
-+	POSTING_READ(intel_dp->output_reg);
- 
--	udelay(17000);
-+	msleep(17);
- 
--	if (IS_eDP(intel_dp))
-+	if (is_edp(intel_dp))
- 		DP |= DP_LINK_TRAIN_OFF;
- 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- 	POSTING_READ(intel_dp->output_reg);
-@@ -1344,32 +1448,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
- static void
- intel_dp_check_link_status(struct intel_dp *intel_dp)
- {
--	uint8_t link_status[DP_LINK_STATUS_SIZE];
--
--	if (!intel_dp->base.enc.crtc)
-+	if (!intel_dp->base.base.crtc)
- 		return;
- 
--	if (!intel_dp_get_link_status(intel_dp, link_status)) {
-+	if (!intel_dp_get_link_status(intel_dp)) {
- 		intel_dp_link_down(intel_dp);
- 		return;
- 	}
- 
--	if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
--		intel_dp_link_train(intel_dp);
-+	if (!intel_channel_eq_ok(intel_dp)) {
-+		intel_dp_start_link_train(intel_dp);
-+		intel_dp_complete_link_train(intel_dp);
-+	}
- }
- 
- static enum drm_connector_status
--ironlake_dp_detect(struct drm_connector *connector)
-+ironlake_dp_detect(struct intel_dp *intel_dp)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- 	enum drm_connector_status status;
- 
-+	/* Can't disconnect eDP */
-+	if (is_edp(intel_dp))
-+		return connector_status_connected;
-+
- 	status = connector_status_disconnected;
- 	if (intel_dp_aux_native_read(intel_dp,
- 				     0x000, intel_dp->dpcd,
--				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
--	{
-+				     sizeof (intel_dp->dpcd))
-+	    == sizeof(intel_dp->dpcd)) {
- 		if (intel_dp->dpcd[0] != 0)
- 			status = connector_status_connected;
- 	}
-@@ -1378,26 +1484,13 @@ ironlake_dp_detect(struct drm_connector *connector)
- 	return status;
- }
- 
--/**
-- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
-- *
-- * \return true if DP port is connected.
-- * \return false if DP port is disconnected.
-- */
- static enum drm_connector_status
--intel_dp_detect(struct drm_connector *connector, bool force)
-+g4x_dp_detect(struct intel_dp *intel_dp)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	uint32_t temp, bit;
- 	enum drm_connector_status status;
--
--	intel_dp->has_audio = false;
--
--	if (HAS_PCH_SPLIT(dev))
--		return ironlake_dp_detect(connector);
-+	uint32_t temp, bit;
- 
- 	switch (intel_dp->output_reg) {
- 	case DP_B:
-@@ -1419,31 +1512,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
- 		return connector_status_disconnected;
- 
- 	status = connector_status_disconnected;
--	if (intel_dp_aux_native_read(intel_dp,
--				     0x000, intel_dp->dpcd,
-+	if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
- 				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- 	{
- 		if (intel_dp->dpcd[0] != 0)
- 			status = connector_status_connected;
- 	}
-+
- 	return status;
- }
- 
-+/**
-+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
-+ *
-+ * \return true if DP port is connected.
-+ * \return false if DP port is disconnected.
-+ */
-+static enum drm_connector_status
-+intel_dp_detect(struct drm_connector *connector, bool force)
-+{
-+	struct intel_dp *intel_dp = intel_attached_dp(connector);
-+	struct drm_device *dev = intel_dp->base.base.dev;
-+	enum drm_connector_status status;
-+	struct edid *edid = NULL;
-+
-+	intel_dp->has_audio = false;
-+
-+	if (HAS_PCH_SPLIT(dev))
-+		status = ironlake_dp_detect(intel_dp);
-+	else
-+		status = g4x_dp_detect(intel_dp);
-+	if (status != connector_status_connected)
-+		return status;
-+
-+	if (intel_dp->force_audio) {
-+		intel_dp->has_audio = intel_dp->force_audio > 0;
-+	} else {
-+		edid = drm_get_edid(connector, &intel_dp->adapter);
-+		if (edid) {
-+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
-+			connector->display_info.raw_edid = NULL;
-+			kfree(edid);
-+		}
-+	}
-+
-+	return connector_status_connected;
-+}
-+
- static int intel_dp_get_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
--	struct drm_device *dev = intel_dp->base.enc.dev;
-+	struct intel_dp *intel_dp = intel_attached_dp(connector);
-+	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	int ret;
- 
- 	/* We should parse the EDID data and find out if it has an audio sink
- 	 */
- 
--	ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
-+	ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
- 	if (ret) {
--		if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
--		    !dev_priv->panel_fixed_mode) {
-+		if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
- 			struct drm_display_mode *newmode;
- 			list_for_each_entry(newmode, &connector->probed_modes,
- 					    head) {
-@@ -1459,7 +1587,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
- 	}
- 
- 	/* if eDP has no EDID, try to use fixed panel mode from VBT */
--	if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
-+	if (is_edp(intel_dp)) {
- 		if (dev_priv->panel_fixed_mode != NULL) {
- 			struct drm_display_mode *mode;
- 			mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
-@@ -1470,6 +1598,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
- 	return 0;
- }
- 
-+static int
-+intel_dp_set_property(struct drm_connector *connector,
-+		      struct drm_property *property,
-+		      uint64_t val)
-+{
-+	struct intel_dp *intel_dp = intel_attached_dp(connector);
-+	int ret;
-+
-+	ret = drm_connector_property_set_value(connector, property, val);
-+	if (ret)
-+		return ret;
-+
-+	if (property == intel_dp->force_audio_property) {
-+		if (val == intel_dp->force_audio)
-+			return 0;
-+
-+		intel_dp->force_audio = val;
-+
-+		if (val > 0 && intel_dp->has_audio)
-+			return 0;
-+		if (val < 0 && !intel_dp->has_audio)
-+			return 0;
-+
-+		intel_dp->has_audio = val > 0;
-+		goto done;
-+	}
-+
-+	return -EINVAL;
-+
-+done:
-+	if (intel_dp->base.base.crtc) {
-+		struct drm_crtc *crtc = intel_dp->base.base.crtc;
-+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-+					 crtc->x, crtc->y,
-+					 crtc->fb);
-+	}
-+
-+	return 0;
-+}
-+
- static void
- intel_dp_destroy (struct drm_connector *connector)
- {
-@@ -1478,6 +1646,15 @@ intel_dp_destroy (struct drm_connector *connector)
- 	kfree(connector);
- }
- 
-+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
-+{
-+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-+
-+	i2c_del_adapter(&intel_dp->adapter);
-+	drm_encoder_cleanup(encoder);
-+	kfree(intel_dp);
-+}
-+
- static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- 	.dpms = intel_dp_dpms,
- 	.mode_fixup = intel_dp_mode_fixup,
-@@ -1490,20 +1667,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
- 	.dpms = drm_helper_connector_dpms,
- 	.detect = intel_dp_detect,
- 	.fill_modes = drm_helper_probe_single_connector_modes,
-+	.set_property = intel_dp_set_property,
- 	.destroy = intel_dp_destroy,
- };
- 
- static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
- 	.get_modes = intel_dp_get_modes,
- 	.mode_valid = intel_dp_mode_valid,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static const struct drm_encoder_funcs intel_dp_enc_funcs = {
--	.destroy = intel_encoder_destroy,
-+	.destroy = intel_dp_encoder_destroy,
- };
- 
--void
-+static void
- intel_dp_hot_plug(struct intel_encoder *intel_encoder)
- {
- 	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
-@@ -1554,6 +1732,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
- 	return false;
- }
- 
-+static void
-+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
-+{
-+	struct drm_device *dev = connector->dev;
-+
-+	intel_dp->force_audio_property =
-+		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-+	if (intel_dp->force_audio_property) {
-+		intel_dp->force_audio_property->values[0] = -1;
-+		intel_dp->force_audio_property->values[1] = 1;
-+		drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
-+	}
-+}
-+
- void
- intel_dp_init(struct drm_device *dev, int output_reg)
- {
-@@ -1580,7 +1772,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
- 		if (intel_dpd_is_edp(dev))
- 			intel_dp->is_pch_edp = true;
- 
--	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
-+	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
- 		type = DRM_MODE_CONNECTOR_eDP;
- 		intel_encoder->type = INTEL_OUTPUT_EDP;
- 	} else {
-@@ -1601,7 +1793,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
- 	else if (output_reg == DP_D || output_reg == PCH_DP_D)
- 		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- 
--	if (IS_eDP(intel_dp))
-+	if (is_edp(intel_dp))
- 		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- 
- 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-@@ -1612,12 +1804,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
- 	intel_dp->has_audio = false;
- 	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
- 
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- 			 DRM_MODE_ENCODER_TMDS);
--	drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
-+	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
- 
--	drm_mode_connector_attach_encoder(&intel_connector->base,
--					  &intel_encoder->enc);
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
- 	drm_sysfs_connector_add(connector);
- 
- 	/* Set up the DDC bus. */
-@@ -1647,10 +1838,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
- 
- 	intel_dp_i2c_init(intel_dp, intel_connector, name);
- 
--	intel_encoder->ddc_bus = &intel_dp->adapter;
-+	/* Cache some DPCD data in the eDP case */
-+	if (is_edp(intel_dp)) {
-+		int ret;
-+		bool was_on;
-+
-+		was_on = ironlake_edp_panel_on(intel_dp);
-+		ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
-+					       intel_dp->dpcd,
-+					       sizeof(intel_dp->dpcd));
-+		if (ret == sizeof(intel_dp->dpcd)) {
-+			if (intel_dp->dpcd[0] >= 0x11)
-+				dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
-+					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-+		} else {
-+			DRM_ERROR("failed to retrieve link info\n");
-+		}
-+		if (!was_on)
-+			ironlake_edp_panel_off(dev);
-+	}
-+
- 	intel_encoder->hot_plug = intel_dp_hot_plug;
- 
--	if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
-+	if (is_edp(intel_dp)) {
- 		/* initialize panel mode from VBT if available for eDP */
- 		if (dev_priv->lfp_lvds_vbt_mode) {
- 			dev_priv->panel_fixed_mode =
-@@ -1662,6 +1872,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
- 		}
- 	}
- 
-+	intel_dp_add_properties(intel_dp, connector);
-+
- 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- 	 * 0xd.  Failure to do so will result in spurious interrupts being
- 	 * generated on the port when a cable is not attached.
-diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
-index 8828b3a..21551fe 100644
---- a/drivers/gpu/drm/i915/intel_drv.h
-+++ b/drivers/gpu/drm/i915/intel_drv.h
-@@ -26,14 +26,12 @@
- #define __INTEL_DRV_H__
- 
- #include <linux/i2c.h>
--#include <linux/i2c-id.h>
--#include <linux/i2c-algo-bit.h>
- #include "i915_drv.h"
- #include "drm_crtc.h"
--
- #include "drm_crtc_helper.h"
-+#include "drm_fb_helper.h"
- 
--#define wait_for(COND, MS, W) ({ \
-+#define _wait_for(COND, MS, W) ({ \
- 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
- 	int ret__ = 0;							\
- 	while (! (COND)) {						\
-@@ -41,11 +39,24 @@
- 			ret__ = -ETIMEDOUT;				\
- 			break;						\
- 		}							\
--		if (W) msleep(W);					\
-+		if (W && !in_dbg_master()) msleep(W);			\
- 	}								\
- 	ret__;								\
- })
- 
-+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-+
-+#define MSLEEP(x) do { \
-+	if (in_dbg_master()) \
-+	       	mdelay(x); \
-+	else \
-+		msleep(x); \
-+} while(0)
-+
-+#define KHz(x) (1000*x)
-+#define MHz(x) KHz(1000*x)
-+
- /*
-  * Display related stuff
-  */
-@@ -96,24 +107,39 @@
- #define INTEL_DVO_CHIP_TMDS 2
- #define INTEL_DVO_CHIP_TVOUT 4
- 
--struct intel_i2c_chan {
--	struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
--	u32 reg; /* GPIO reg */
--	struct i2c_adapter adapter;
--	struct i2c_algo_bit_data algo;
--};
-+/* drm_display_mode->private_flags */
-+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
-+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-+
-+static inline void
-+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
-+				int multiplier)
-+{
-+	mode->clock *= multiplier;
-+	mode->private_flags |= multiplier;
-+}
-+
-+static inline int
-+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
-+{
-+	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
-+}
- 
- struct intel_framebuffer {
- 	struct drm_framebuffer base;
- 	struct drm_gem_object *obj;
- };
- 
-+struct intel_fbdev {
-+	struct drm_fb_helper helper;
-+	struct intel_framebuffer ifb;
-+	struct list_head fbdev_list;
-+	struct drm_display_mode *our_mode;
-+};
- 
- struct intel_encoder {
--	struct drm_encoder enc;
-+	struct drm_encoder base;
- 	int type;
--	struct i2c_adapter *i2c_bus;
--	struct i2c_adapter *ddc_bus;
- 	bool load_detect_temp;
- 	bool needs_tv_clock;
- 	void (*hot_plug)(struct intel_encoder *);
-@@ -123,32 +149,7 @@ struct intel_encoder {
- 
- struct intel_connector {
- 	struct drm_connector base;
--};
--
--struct intel_crtc;
--struct intel_overlay {
--	struct drm_device *dev;
--	struct intel_crtc *crtc;
--	struct drm_i915_gem_object *vid_bo;
--	struct drm_i915_gem_object *old_vid_bo;
--	int active;
--	int pfit_active;
--	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
--	u32 color_key;
--	u32 brightness, contrast, saturation;
--	u32 old_xscale, old_yscale;
--	/* register access */
--	u32 flip_addr;
--	struct drm_i915_gem_object *reg_bo;
--	void *virt_addr;
--	/* flip handling */
--	uint32_t last_flip_req;
--	int hw_wedged;
--#define HW_WEDGED		1
--#define NEEDS_WAIT_FOR_FLIP	2
--#define RELEASE_OLD_VID		3
--#define SWITCH_OFF_STAGE_1	4
--#define SWITCH_OFF_STAGE_2	5
-+	struct intel_encoder *encoder;
- };
- 
- struct intel_crtc {
-@@ -157,6 +158,7 @@ struct intel_crtc {
- 	enum plane plane;
- 	u8 lut_r[256], lut_g[256], lut_b[256];
- 	int dpms_mode;
-+	bool active; /* is the crtc on? independent of the dpms mode */
- 	bool busy; /* is scanout buffer being updated frequently? */
- 	struct timer_list idle_timer;
- 	bool lowfreq_avail;
-@@ -168,14 +170,53 @@ struct intel_crtc {
- 	uint32_t cursor_addr;
- 	int16_t cursor_x, cursor_y;
- 	int16_t cursor_width, cursor_height;
--	bool cursor_visible, cursor_on;
-+	bool cursor_visible;
- };
- 
- #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
- #define to_intel_connector(x) container_of(x, struct intel_connector, base)
--#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
-+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
- #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
- 
-+#define DIP_TYPE_AVI    0x82
-+#define DIP_VERSION_AVI 0x2
-+#define DIP_LEN_AVI     13
-+
-+struct dip_infoframe {
-+	uint8_t type;		/* HB0 */
-+	uint8_t ver;		/* HB1 */
-+	uint8_t len;		/* HB2 - body len, not including checksum */
-+	uint8_t ecc;		/* Header ECC */
-+	uint8_t checksum;	/* PB0 */
-+	union {
-+		struct {
-+			/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
-+			uint8_t Y_A_B_S;
-+			/* PB2 - C 7:6, M 5:4, R 3:0 */
-+			uint8_t C_M_R;
-+			/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
-+			uint8_t ITC_EC_Q_SC;
-+			/* PB4 - VIC 6:0 */
-+			uint8_t VIC;
-+			/* PB5 - PR 3:0 */
-+			uint8_t PR;
-+			/* PB6 to PB13 */
-+			uint16_t top_bar_end;
-+			uint16_t bottom_bar_start;
-+			uint16_t left_bar_end;
-+			uint16_t right_bar_start;
-+		} avi;
-+		uint8_t payload[27];
-+	} __attribute__ ((packed)) body;
-+} __attribute__((packed));
-+
-+static inline struct drm_crtc *
-+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	return dev_priv->pipe_to_crtc_mapping[pipe];
-+}
-+
- struct intel_unpin_work {
- 	struct work_struct work;
- 	struct drm_device *dev;
-@@ -186,16 +227,12 @@ struct intel_unpin_work {
- 	bool enable_stall_check;
- };
- 
--struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
--				     const char *name);
--void intel_i2c_destroy(struct i2c_adapter *adapter);
- int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
--extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
--void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
--void intel_i2c_reset_gmbus(struct drm_device *dev);
-+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
- 
- extern void intel_crt_init(struct drm_device *dev);
- extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
- extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
- extern void intel_dvo_init(struct drm_device *dev);
- extern void intel_tv_init(struct drm_device *dev);
-@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
- void
- intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
- 		 struct drm_display_mode *adjusted_mode);
--extern bool intel_pch_has_edp(struct drm_crtc *crtc);
- extern bool intel_dpd_is_edp(struct drm_device *dev);
- extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
-+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
- 
--
-+/* intel_panel.c */
- extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- 				   struct drm_display_mode *adjusted_mode);
- extern void intel_pch_panel_fitting(struct drm_device *dev,
- 				    int fitting_mode,
- 				    struct drm_display_mode *mode,
- 				    struct drm_display_mode *adjusted_mode);
-+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-+extern u32 intel_panel_get_backlight(struct drm_device *dev);
-+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
- 
--extern int intel_panel_fitter_pipe (struct drm_device *dev);
- extern void intel_crtc_load_lut(struct drm_crtc *crtc);
- extern void intel_encoder_prepare (struct drm_encoder *encoder);
- extern void intel_encoder_commit (struct drm_encoder *encoder);
- extern void intel_encoder_destroy(struct drm_encoder *encoder);
- 
--extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
-+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
-+{
-+	return to_intel_connector(connector)->encoder;
-+}
-+
-+extern void intel_connector_attach_encoder(struct intel_connector *connector,
-+					   struct intel_encoder *encoder);
-+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
- 
- extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- 						    struct drm_crtc *crtc);
- int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- 				struct drm_file *file_priv);
- extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
--extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
-+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
- extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- 						   struct drm_connector *connector,
- 						   struct drm_display_mode *mode,
-@@ -250,9 +296,11 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- extern void intel_init_clock_gating(struct drm_device *dev);
- extern void ironlake_enable_drps(struct drm_device *dev);
- extern void ironlake_disable_drps(struct drm_device *dev);
-+extern void intel_init_emon(struct drm_device *dev);
- 
- extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
--				      struct drm_gem_object *obj);
-+				      struct drm_gem_object *obj,
-+				      bool pipelined);
- 
- extern int intel_framebuffer_init(struct drm_device *dev,
- 				  struct intel_framebuffer *ifb,
-@@ -267,9 +315,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
- 
- extern void intel_setup_overlay(struct drm_device *dev);
- extern void intel_cleanup_overlay(struct drm_device *dev);
--extern int intel_overlay_switch_off(struct intel_overlay *overlay);
--extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
--						int interruptible);
-+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
-+				    bool interruptible);
- extern int intel_overlay_put_image(struct drm_device *dev, void *data,
- 				   struct drm_file *file_priv);
- extern int intel_overlay_attrs(struct drm_device *dev, void *data,
-diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
-index 7c9ec14..ea37328 100644
---- a/drivers/gpu/drm/i915/intel_dvo.c
-+++ b/drivers/gpu/drm/i915/intel_dvo.c
-@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
- 		.name = "ch7017",
- 		.dvo_reg = DVOC,
- 		.slave_addr = 0x75,
--		.gpio = GPIOE,
-+		.gpio = GMBUS_PORT_DPB,
- 		.dev_ops = &ch7017_ops,
- 	}
- };
-@@ -88,7 +88,13 @@ struct intel_dvo {
- 
- static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
-+	return container_of(encoder, struct intel_dvo, base.base);
-+}
-+
-+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
-+{
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_dvo, base);
- }
- 
- static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
-@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
- static int intel_dvo_mode_valid(struct drm_connector *connector,
- 				struct drm_display_mode *mode)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- 
- 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- 		return MODE_NO_DBLESCAN;
-@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
- static enum drm_connector_status
- intel_dvo_detect(struct drm_connector *connector, bool force)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
--
-+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- 	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
- }
- 
- static int intel_dvo_get_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
- 
- 	/* We should probably have an i2c driver get_modes function for those
- 	 * devices which will have a fixed set of modes determined by the chip
- 	 * (TV-out, for example), but for now with just TMDS and LVDS,
- 	 * that's not the case.
- 	 */
--	intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
-+	intel_ddc_get_modes(connector,
-+			    &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
- 	if (!list_empty(&connector->probed_modes))
- 		return 1;
- 
-@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
- 	.mode_valid = intel_dvo_mode_valid,
- 	.get_modes = intel_dvo_get_modes,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
-@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
- {
- 	struct drm_device *dev = connector->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- 	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
- 	struct drm_display_mode *mode = NULL;
- 
-@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
- 		struct drm_crtc *crtc;
- 		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- 
--		crtc = intel_get_crtc_from_pipe(dev, pipe);
-+		crtc = intel_get_crtc_for_pipe(dev, pipe);
- 		if (crtc) {
- 			mode = intel_crtc_mode_get(dev, crtc);
- 			if (mode) {
-@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
- 
- void intel_dvo_init(struct drm_device *dev)
- {
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_encoder *intel_encoder;
- 	struct intel_dvo *intel_dvo;
- 	struct intel_connector *intel_connector;
--	struct i2c_adapter *i2cbus = NULL;
--	int ret = 0;
- 	int i;
- 	int encoder_type = DRM_MODE_ENCODER_NONE;
- 
-@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
- 	}
- 
- 	intel_encoder = &intel_dvo->base;
--
--	/* Set up the DDC bus */
--	intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
--	if (!intel_encoder->ddc_bus)
--		goto free_intel;
-+	drm_encoder_init(dev, &intel_encoder->base,
-+			 &intel_dvo_enc_funcs, encoder_type);
- 
- 	/* Now, try to find a controller */
- 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- 		struct drm_connector *connector = &intel_connector->base;
- 		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
-+		struct i2c_adapter *i2c;
- 		int gpio;
- 
- 		/* Allow the I2C driver info to specify the GPIO to be used in
-@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
- 		if (dvo->gpio != 0)
- 			gpio = dvo->gpio;
- 		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
--			gpio = GPIOB;
-+			gpio = GMBUS_PORT_SSC;
- 		else
--			gpio = GPIOE;
-+			gpio = GMBUS_PORT_DPB;
- 
- 		/* Set up the I2C bus necessary for the chip we're probing.
- 		 * It appears that everything is on GPIOE except for panels
- 		 * on i830 laptops, which are on GPIOB (DVOA).
- 		 */
--		if (i2cbus != NULL)
--			intel_i2c_destroy(i2cbus);
--		if (!(i2cbus = intel_i2c_create(dev, gpio,
--			gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
--			continue;
--		}
-+		i2c = &dev_priv->gmbus[gpio].adapter;
- 
- 		intel_dvo->dev = *dvo;
--		ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
--		if (!ret)
-+		if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
- 			continue;
- 
- 		intel_encoder->type = INTEL_OUTPUT_DVO;
-@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
- 		connector->interlace_allowed = false;
- 		connector->doublescan_allowed = false;
- 
--		drm_encoder_init(dev, &intel_encoder->enc,
--				 &intel_dvo_enc_funcs, encoder_type);
--		drm_encoder_helper_add(&intel_encoder->enc,
-+		drm_encoder_helper_add(&intel_encoder->base,
- 				       &intel_dvo_helper_funcs);
- 
--		drm_mode_connector_attach_encoder(&intel_connector->base,
--						  &intel_encoder->enc);
-+		intel_connector_attach_encoder(intel_connector, intel_encoder);
- 		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
- 			/* For our LVDS chipsets, we should hopefully be able
- 			 * to dig the fixed panel mode out of the BIOS data.
-@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
- 		return;
- 	}
- 
--	intel_i2c_destroy(intel_encoder->ddc_bus);
--	/* Didn't find a chip, so tear down. */
--	if (i2cbus != NULL)
--		intel_i2c_destroy(i2cbus);
--free_intel:
-+	drm_encoder_cleanup(&intel_encoder->base);
- 	kfree(intel_dvo);
- 	kfree(intel_connector);
- }
-diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
-index b61966c..af2a1dd 100644
---- a/drivers/gpu/drm/i915/intel_fb.c
-+++ b/drivers/gpu/drm/i915/intel_fb.c
-@@ -44,13 +44,6 @@
- #include "i915_drm.h"
- #include "i915_drv.h"
- 
--struct intel_fbdev {
--	struct drm_fb_helper helper;
--	struct intel_framebuffer ifb;
--	struct list_head fbdev_list;
--	struct drm_display_mode *our_mode;
--};
--
- static struct fb_ops intelfb_ops = {
- 	.owner = THIS_MODULE,
- 	.fb_check_var = drm_fb_helper_check_var,
-@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
- 	struct drm_gem_object *fbo = NULL;
- 	struct drm_i915_gem_object *obj_priv;
- 	struct device *device = &dev->pdev->dev;
--	int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
-+	int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
- 
- 	/* we don't do packed 24bpp */
- 	if (sizes->surface_bpp == 24)
-@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
- 
- 	mutex_lock(&dev->struct_mutex);
- 
--	ret = intel_pin_and_fence_fb_obj(dev, fbo);
-+	/* Flush everything out, we'll be doing GTT only from now on */
-+	ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
- 	if (ret) {
- 		DRM_ERROR("failed to pin fb: %d\n", ret);
- 		goto out_unref;
- 	}
- 
--	/* Flush everything out, we'll be doing GTT only from now on */
--	ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
--	if (ret) {
--		DRM_ERROR("failed to bind fb: %d.\n", ret);
--		goto out_unpin;
--	}
--
- 	info = framebuffer_alloc(0, device);
- 	if (!info) {
- 		ret = -ENOMEM;
-@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
- 		goto out_unpin;
- 	}
- 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
--	if (IS_I9XX(dev))
-+	if (!IS_GEN2(dev))
- 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
- 	else
- 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
-@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- 	.fb_probe = intel_fb_find_or_create_single,
- };
- 
--int intel_fbdev_destroy(struct drm_device *dev,
--			struct intel_fbdev *ifbdev)
-+static void intel_fbdev_destroy(struct drm_device *dev,
-+				struct intel_fbdev *ifbdev)
- {
- 	struct fb_info *info;
- 	struct intel_framebuffer *ifb = &ifbdev->ifb;
-@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
- 
- 	drm_framebuffer_cleanup(&ifb->base);
- 	if (ifb->obj) {
--		drm_gem_object_unreference(ifb->obj);
-+		drm_gem_object_unreference_unlocked(ifb->obj);
- 		ifb->obj = NULL;
- 	}
--
--	return 0;
- }
- 
- int intel_fbdev_init(struct drm_device *dev)
-diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
-index 926934a..0d0273e 100644
---- a/drivers/gpu/drm/i915/intel_hdmi.c
-+++ b/drivers/gpu/drm/i915/intel_hdmi.c
-@@ -40,12 +40,76 @@
- struct intel_hdmi {
- 	struct intel_encoder base;
- 	u32 sdvox_reg;
-+	int ddc_bus;
- 	bool has_hdmi_sink;
-+	bool has_audio;
-+	int force_audio;
-+	struct drm_property *force_audio_property;
- };
- 
- static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
-+	return container_of(encoder, struct intel_hdmi, base.base);
-+}
-+
-+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
-+{
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_hdmi, base);
-+}
-+
-+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
-+{
-+	uint8_t *data = (uint8_t *)avi_if;
-+	uint8_t sum = 0;
-+	unsigned i;
-+
-+	avi_if->checksum = 0;
-+	avi_if->ecc = 0;
-+
-+	for (i = 0; i < sizeof(*avi_if); i++)
-+		sum += data[i];
-+
-+	avi_if->checksum = 0x100 - sum;
-+}
-+
-+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
-+{
-+	struct dip_infoframe avi_if = {
-+		.type = DIP_TYPE_AVI,
-+		.ver = DIP_VERSION_AVI,
-+		.len = DIP_LEN_AVI,
-+	};
-+	uint32_t *data = (uint32_t *)&avi_if;
-+	struct drm_device *dev = encoder->dev;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-+	u32 port;
-+	unsigned i;
-+
-+	if (!intel_hdmi->has_hdmi_sink)
-+		return;
-+
-+	/* XXX first guess at handling video port, is this corrent? */
-+	if (intel_hdmi->sdvox_reg == SDVOB)
-+		port = VIDEO_DIP_PORT_B;
-+	else if (intel_hdmi->sdvox_reg == SDVOC)
-+		port = VIDEO_DIP_PORT_C;
-+	else
-+		return;
-+
-+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
-+		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
-+
-+	intel_dip_infoframe_csum(&avi_if);
-+	for (i = 0; i < sizeof(avi_if); i += 4) {
-+		I915_WRITE(VIDEO_DIP_DATA, *data);
-+		data++;
-+	}
-+
-+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
-+		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
-+		   VIDEO_DIP_ENABLE_AVI);
- }
- 
- static void intel_hdmi_mode_set(struct drm_encoder *encoder,
-@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
- 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- 		sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- 
--	if (intel_hdmi->has_hdmi_sink) {
-+	/* Required on CPT */
-+	if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
-+		sdvox |= HDMI_MODE_SELECT;
-+
-+	if (intel_hdmi->has_audio) {
- 		sdvox |= SDVO_AUDIO_ENABLE;
--		if (HAS_PCH_CPT(dev))
--			sdvox |= HDMI_MODE_SELECT;
-+		sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
- 	}
- 
- 	if (intel_crtc->pipe == 1) {
-@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
- 
- 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
- 	POSTING_READ(intel_hdmi->sdvox_reg);
-+
-+	intel_hdmi_set_avi_infoframe(encoder);
- }
- 
- static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
-@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- static enum drm_connector_status
- intel_hdmi_detect(struct drm_connector *connector, bool force)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
--	struct edid *edid = NULL;
-+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-+	struct edid *edid;
- 	enum drm_connector_status status = connector_status_disconnected;
- 
- 	intel_hdmi->has_hdmi_sink = false;
--	edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
-+	intel_hdmi->has_audio = false;
-+	edid = drm_get_edid(connector,
-+			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
- 
- 	if (edid) {
- 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
- 			status = connector_status_connected;
- 			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
-+			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
- 		}
- 		connector->display_info.raw_edid = NULL;
- 		kfree(edid);
- 	}
- 
-+	if (status == connector_status_connected) {
-+		if (intel_hdmi->force_audio)
-+			intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
-+	}
-+
- 	return status;
- }
- 
- static int intel_hdmi_get_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
- 
- 	/* We should parse the EDID data and find out if it's an HDMI sink so
- 	 * we can send audio to it.
- 	 */
- 
--	return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
-+	return intel_ddc_get_modes(connector,
-+				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
-+}
-+
-+static int
-+intel_hdmi_set_property(struct drm_connector *connector,
-+		      struct drm_property *property,
-+		      uint64_t val)
-+{
-+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-+	int ret;
-+
-+	ret = drm_connector_property_set_value(connector, property, val);
-+	if (ret)
-+		return ret;
-+
-+	if (property == intel_hdmi->force_audio_property) {
-+		if (val == intel_hdmi->force_audio)
-+			return 0;
-+
-+		intel_hdmi->force_audio = val;
-+
-+		if (val > 0 && intel_hdmi->has_audio)
-+			return 0;
-+		if (val < 0 && !intel_hdmi->has_audio)
-+			return 0;
-+
-+		intel_hdmi->has_audio = val > 0;
-+		goto done;
-+	}
-+
-+	return -EINVAL;
-+
-+done:
-+	if (intel_hdmi->base.base.crtc) {
-+		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
-+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-+					 crtc->x, crtc->y,
-+					 crtc->fb);
-+	}
-+
-+	return 0;
- }
- 
- static void intel_hdmi_destroy(struct drm_connector *connector)
-@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- 	.dpms = drm_helper_connector_dpms,
- 	.detect = intel_hdmi_detect,
- 	.fill_modes = drm_helper_probe_single_connector_modes,
-+	.set_property = intel_hdmi_set_property,
- 	.destroy = intel_hdmi_destroy,
- };
- 
- static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
- 	.get_modes = intel_hdmi_get_modes,
- 	.mode_valid = intel_hdmi_mode_valid,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- 	.destroy = intel_encoder_destroy,
- };
- 
-+static void
-+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
-+{
-+	struct drm_device *dev = connector->dev;
-+
-+	intel_hdmi->force_audio_property =
-+		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-+	if (intel_hdmi->force_audio_property) {
-+		intel_hdmi->force_audio_property->values[0] = -1;
-+		intel_hdmi->force_audio_property->values[1] = 1;
-+		drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
-+	}
-+}
-+
- void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
- 	}
- 
- 	intel_encoder = &intel_hdmi->base;
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
-+			 DRM_MODE_ENCODER_TMDS);
-+
- 	connector = &intel_connector->base;
- 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- 			   DRM_MODE_CONNECTOR_HDMIA);
-@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
- 	/* Set up the DDC bus. */
- 	if (sdvox_reg == SDVOB) {
- 		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
--		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
-+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- 	} else if (sdvox_reg == SDVOC) {
- 		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
--		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
-+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- 	} else if (sdvox_reg == HDMIB) {
- 		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
--		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
--								"HDMIB");
-+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- 	} else if (sdvox_reg == HDMIC) {
- 		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
--		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
--								"HDMIC");
-+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- 	} else if (sdvox_reg == HDMID) {
- 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
--		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
--								"HDMID");
-+		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- 	}
--	if (!intel_encoder->ddc_bus)
--		goto err_connector;
- 
- 	intel_hdmi->sdvox_reg = sdvox_reg;
- 
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
--			 DRM_MODE_ENCODER_TMDS);
--	drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
-+	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
-+
-+	intel_hdmi_add_properties(intel_hdmi, connector);
- 
--	drm_mode_connector_attach_encoder(&intel_connector->base,
--					  &intel_encoder->enc);
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
- 	drm_sysfs_connector_add(connector);
- 
- 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
-@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
- 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
- 	}
--
--	return;
--
--err_connector:
--	drm_connector_cleanup(connector);
--	kfree(intel_hdmi);
--	kfree(intel_connector);
--
--	return;
- }
-diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
-index c2649c7..2be4f72 100644
---- a/drivers/gpu/drm/i915/intel_i2c.c
-+++ b/drivers/gpu/drm/i915/intel_i2c.c
-@@ -1,6 +1,6 @@
- /*
-  * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
-- * Copyright © 2006-2008 Intel Corporation
-+ * Copyright © 2006-2008,2010 Intel Corporation
-  *   Jesse Barnes <jesse.barnes@intel.com>
-  *
-  * Permission is hereby granted, free of charge, to any person obtaining a
-@@ -24,10 +24,9 @@
-  *
-  * Authors:
-  *	Eric Anholt <eric@anholt.net>
-+ *	Chris Wilson <chris@chris-wilson.co.uk>
-  */
- #include <linux/i2c.h>
--#include <linux/slab.h>
--#include <linux/i2c-id.h>
- #include <linux/i2c-algo-bit.h>
- #include "drmP.h"
- #include "drm.h"
-@@ -35,79 +34,106 @@
- #include "i915_drm.h"
- #include "i915_drv.h"
- 
--void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
-+/* Intel GPIO access functions */
-+
-+#define I2C_RISEFALL_TIME 20
-+
-+static inline struct intel_gmbus *
-+to_intel_gmbus(struct i2c_adapter *i2c)
-+{
-+	return container_of(i2c, struct intel_gmbus, adapter);
-+}
-+
-+struct intel_gpio {
-+	struct i2c_adapter adapter;
-+	struct i2c_algo_bit_data algo;
-+	struct drm_i915_private *dev_priv;
-+	u32 reg;
-+};
-+
-+void
-+intel_i2c_reset(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-+	if (HAS_PCH_SPLIT(dev))
-+		I915_WRITE(PCH_GMBUS0, 0);
-+	else
-+		I915_WRITE(GMBUS0, 0);
-+}
-+
-+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
-+{
-+	u32 val;
- 
- 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
--	if (!IS_PINEVIEW(dev))
-+	if (!IS_PINEVIEW(dev_priv->dev))
- 		return;
-+
-+	val = I915_READ(DSPCLK_GATE_D);
- 	if (enable)
--		I915_WRITE(DSPCLK_GATE_D,
--			I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
-+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
- 	else
--		I915_WRITE(DSPCLK_GATE_D,
--			I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
-+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
-+	I915_WRITE(DSPCLK_GATE_D, val);
- }
- 
--/*
-- * Intel GPIO access functions
-- */
-+static u32 get_reserved(struct intel_gpio *gpio)
-+{
-+	struct drm_i915_private *dev_priv = gpio->dev_priv;
-+	struct drm_device *dev = dev_priv->dev;
-+	u32 reserved = 0;
- 
--#define I2C_RISEFALL_TIME 20
-+	/* On most chips, these bits must be preserved in software. */
-+	if (!IS_I830(dev) && !IS_845G(dev))
-+		reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
-+						   GPIO_CLOCK_PULLUP_DISABLE);
-+
-+	return reserved;
-+}
- 
- static int get_clock(void *data)
- {
--	struct intel_i2c_chan *chan = data;
--	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
--	u32 val;
--
--	val = I915_READ(chan->reg);
--	return ((val & GPIO_CLOCK_VAL_IN) != 0);
-+	struct intel_gpio *gpio = data;
-+	struct drm_i915_private *dev_priv = gpio->dev_priv;
-+	u32 reserved = get_reserved(gpio);
-+	I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
-+	I915_WRITE(gpio->reg, reserved);
-+	return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
- }
- 
- static int get_data(void *data)
- {
--	struct intel_i2c_chan *chan = data;
--	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
--	u32 val;
--
--	val = I915_READ(chan->reg);
--	return ((val & GPIO_DATA_VAL_IN) != 0);
-+	struct intel_gpio *gpio = data;
-+	struct drm_i915_private *dev_priv = gpio->dev_priv;
-+	u32 reserved = get_reserved(gpio);
-+	I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
-+	I915_WRITE(gpio->reg, reserved);
-+	return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
- }
- 
- static void set_clock(void *data, int state_high)
- {
--	struct intel_i2c_chan *chan = data;
--	struct drm_device *dev = chan->drm_dev;
--	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
--	u32 reserved = 0, clock_bits;
--
--	/* On most chips, these bits must be preserved in software. */
--	if (!IS_I830(dev) && !IS_845G(dev))
--		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
--						   GPIO_CLOCK_PULLUP_DISABLE);
-+	struct intel_gpio *gpio = data;
-+	struct drm_i915_private *dev_priv = gpio->dev_priv;
-+	u32 reserved = get_reserved(gpio);
-+	u32 clock_bits;
- 
- 	if (state_high)
- 		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
- 	else
- 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
- 			GPIO_CLOCK_VAL_MASK;
--	I915_WRITE(chan->reg, reserved | clock_bits);
--	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
-+
-+	I915_WRITE(gpio->reg, reserved | clock_bits);
-+	POSTING_READ(gpio->reg);
- }
- 
- static void set_data(void *data, int state_high)
- {
--	struct intel_i2c_chan *chan = data;
--	struct drm_device *dev = chan->drm_dev;
--	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
--	u32 reserved = 0, data_bits;
--
--	/* On most chips, these bits must be preserved in software. */
--	if (!IS_I830(dev) && !IS_845G(dev))
--		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
--						   GPIO_CLOCK_PULLUP_DISABLE);
-+	struct intel_gpio *gpio = data;
-+	struct drm_i915_private *dev_priv = gpio->dev_priv;
-+	u32 reserved = get_reserved(gpio);
-+	u32 data_bits;
- 
- 	if (state_high)
- 		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
-@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
- 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
- 			GPIO_DATA_VAL_MASK;
- 
--	I915_WRITE(chan->reg, reserved | data_bits);
--	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
-+	I915_WRITE(gpio->reg, reserved | data_bits);
-+	POSTING_READ(gpio->reg);
- }
- 
--/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
-- * engine, but if the BIOS leaves it enabled, then that can break our use
-- * of the bit-banging I2C interfaces.  This is notably the case with the
-- * Mac Mini in EFI mode.
-- */
--void
--intel_i2c_reset_gmbus(struct drm_device *dev)
-+static struct i2c_adapter *
-+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
- {
--	struct drm_i915_private *dev_priv = dev->dev_private;
-+	static const int map_pin_to_reg[] = {
-+		0,
-+		GPIOB,
-+		GPIOA,
-+		GPIOC,
-+		GPIOD,
-+		GPIOE,
-+		0,
-+		GPIOF,
-+	};
-+	struct intel_gpio *gpio;
- 
--	if (HAS_PCH_SPLIT(dev)) {
--		I915_WRITE(PCH_GMBUS0, 0);
--	} else {
--		I915_WRITE(GMBUS0, 0);
-+	if (pin < 1 || pin > 7)
-+		return NULL;
-+
-+	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
-+	if (gpio == NULL)
-+		return NULL;
-+
-+	gpio->reg = map_pin_to_reg[pin];
-+	if (HAS_PCH_SPLIT(dev_priv->dev))
-+		gpio->reg += PCH_GPIOA - GPIOA;
-+	gpio->dev_priv = dev_priv;
-+
-+	snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
-+	gpio->adapter.owner = THIS_MODULE;
-+	gpio->adapter.algo_data	= &gpio->algo;
-+	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
-+	gpio->algo.setsda = set_data;
-+	gpio->algo.setscl = set_clock;
-+	gpio->algo.getsda = get_data;
-+	gpio->algo.getscl = get_clock;
-+	gpio->algo.udelay = I2C_RISEFALL_TIME;
-+	gpio->algo.timeout = usecs_to_jiffies(2200);
-+	gpio->algo.data = gpio;
-+
-+	if (i2c_bit_add_bus(&gpio->adapter))
-+		goto out_free;
-+
-+	return &gpio->adapter;
-+
-+out_free:
-+	kfree(gpio);
-+	return NULL;
-+}
-+
-+static int
-+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
-+		     struct i2c_adapter *adapter,
-+		     struct i2c_msg *msgs,
-+		     int num)
-+{
-+	struct intel_gpio *gpio = container_of(adapter,
-+					       struct intel_gpio,
-+					       adapter);
-+	int ret;
-+
-+	intel_i2c_reset(dev_priv->dev);
-+
-+	intel_i2c_quirk_set(dev_priv, true);
-+	set_data(gpio, 1);
-+	set_clock(gpio, 1);
-+	udelay(I2C_RISEFALL_TIME);
-+
-+	ret = adapter->algo->master_xfer(adapter, msgs, num);
-+
-+	set_data(gpio, 1);
-+	set_clock(gpio, 1);
-+	intel_i2c_quirk_set(dev_priv, false);
-+
-+	return ret;
-+}
-+
-+static int
-+gmbus_xfer(struct i2c_adapter *adapter,
-+	   struct i2c_msg *msgs,
-+	   int num)
-+{
-+	struct intel_gmbus *bus = container_of(adapter,
-+					       struct intel_gmbus,
-+					       adapter);
-+	struct drm_i915_private *dev_priv = adapter->algo_data;
-+	int i, reg_offset;
-+
-+	if (bus->force_bit)
-+		return intel_i2c_quirk_xfer(dev_priv,
-+					    bus->force_bit, msgs, num);
-+
-+	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
-+
-+	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
-+
-+	for (i = 0; i < num; i++) {
-+		u16 len = msgs[i].len;
-+		u8 *buf = msgs[i].buf;
-+
-+		if (msgs[i].flags & I2C_M_RD) {
-+			I915_WRITE(GMBUS1 + reg_offset,
-+				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
-+				   (len << GMBUS_BYTE_COUNT_SHIFT) |
-+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
-+				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
-+			POSTING_READ(GMBUS2+reg_offset);
-+			do {
-+				u32 val, loop = 0;
-+
-+				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
-+					goto timeout;
-+				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
-+					return 0;
-+
-+				val = I915_READ(GMBUS3 + reg_offset);
-+				do {
-+					*buf++ = val & 0xff;
-+					val >>= 8;
-+				} while (--len && ++loop < 4);
-+			} while (len);
-+		} else {
-+			u32 val, loop;
-+
-+			val = loop = 0;
-+			do {
-+				val |= *buf++ << (8 * loop);
-+			} while (--len && ++loop < 4);
-+
-+			I915_WRITE(GMBUS3 + reg_offset, val);
-+			I915_WRITE(GMBUS1 + reg_offset,
-+				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
-+				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
-+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
-+				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
-+			POSTING_READ(GMBUS2+reg_offset);
-+
-+			while (len) {
-+				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
-+					goto timeout;
-+				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
-+					return 0;
-+
-+				val = loop = 0;
-+				do {
-+					val |= *buf++ << (8 * loop);
-+				} while (--len && ++loop < 4);
-+
-+				I915_WRITE(GMBUS3 + reg_offset, val);
-+				POSTING_READ(GMBUS2+reg_offset);
-+			}
-+		}
-+
-+		if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
-+			goto timeout;
-+		if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
-+			return 0;
- 	}
-+
-+	return num;
-+
-+timeout:
-+	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
-+		 bus->reg0 & 0xff, bus->adapter.name);
-+	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
-+	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
-+	if (!bus->force_bit)
-+		return -ENOMEM;
-+
-+	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
- }
- 
-+static u32 gmbus_func(struct i2c_adapter *adapter)
-+{
-+	struct intel_gmbus *bus = container_of(adapter,
-+					       struct intel_gmbus,
-+					       adapter);
-+
-+	if (bus->force_bit)
-+		bus->force_bit->algo->functionality(bus->force_bit);
-+
-+	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
-+		/* I2C_FUNC_10BIT_ADDR | */
-+		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
-+		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
-+}
-+
-+static const struct i2c_algorithm gmbus_algorithm = {
-+	.master_xfer	= gmbus_xfer,
-+	.functionality	= gmbus_func
-+};
-+
- /**
-- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
-+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
-  * @dev: DRM device
-- * @output: driver specific output device
-- * @reg: GPIO reg to use
-- * @name: name for this bus
-- * @slave_addr: slave address (if fixed)
-- *
-- * Creates and registers a new i2c bus with the Linux i2c layer, for use
-- * in output probing and control (e.g. DDC or SDVO control functions).
-- *
-- * Possible values for @reg include:
-- *   %GPIOA
-- *   %GPIOB
-- *   %GPIOC
-- *   %GPIOD
-- *   %GPIOE
-- *   %GPIOF
-- *   %GPIOG
-- *   %GPIOH
-- * see PRM for details on how these different busses are used.
-  */
--struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
--				     const char *name)
-+int intel_setup_gmbus(struct drm_device *dev)
- {
--	struct intel_i2c_chan *chan;
-+	static const char *names[GMBUS_NUM_PORTS] = {
-+		"disabled",
-+		"ssc",
-+		"vga",
-+		"panel",
-+		"dpc",
-+		"dpb",
-+		"reserved"
-+		"dpd",
-+	};
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int ret, i;
- 
--	chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
--	if (!chan)
--		goto out_free;
-+	dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
-+				  GFP_KERNEL);
-+	if (dev_priv->gmbus == NULL)
-+		return -ENOMEM;
- 
--	chan->drm_dev = dev;
--	chan->reg = reg;
--	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
--	chan->adapter.owner = THIS_MODULE;
--	chan->adapter.algo_data	= &chan->algo;
--	chan->adapter.dev.parent = &dev->pdev->dev;
--	chan->algo.setsda = set_data;
--	chan->algo.setscl = set_clock;
--	chan->algo.getsda = get_data;
--	chan->algo.getscl = get_clock;
--	chan->algo.udelay = 20;
--	chan->algo.timeout = usecs_to_jiffies(2200);
--	chan->algo.data = chan;
--
--	i2c_set_adapdata(&chan->adapter, chan);
--
--	if(i2c_bit_add_bus(&chan->adapter))
--		goto out_free;
-+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
-+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
- 
--	intel_i2c_reset_gmbus(dev);
-+		bus->adapter.owner = THIS_MODULE;
-+		bus->adapter.class = I2C_CLASS_DDC;
-+		snprintf(bus->adapter.name,
-+			 I2C_NAME_SIZE,
-+			 "gmbus %s",
-+			 names[i]);
- 
--	/* JJJ:  raise SCL and SDA? */
--	intel_i2c_quirk_set(dev, true);
--	set_data(chan, 1);
--	set_clock(chan, 1);
--	intel_i2c_quirk_set(dev, false);
--	udelay(20);
-+		bus->adapter.dev.parent = &dev->pdev->dev;
-+		bus->adapter.algo_data	= dev_priv;
- 
--	return &chan->adapter;
-+		bus->adapter.algo = &gmbus_algorithm;
-+		ret = i2c_add_adapter(&bus->adapter);
-+		if (ret)
-+			goto err;
- 
--out_free:
--	kfree(chan);
--	return NULL;
-+		/* By default use a conservative clock rate */
-+		bus->reg0 = i | GMBUS_RATE_100KHZ;
-+
-+		/* XXX force bit banging until GMBUS is fully debugged */
-+		bus->force_bit = intel_gpio_create(dev_priv, i);
-+	}
-+
-+	intel_i2c_reset(dev_priv->dev);
-+
-+	return 0;
-+
-+err:
-+	while (--i) {
-+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
-+		i2c_del_adapter(&bus->adapter);
-+	}
-+	kfree(dev_priv->gmbus);
-+	dev_priv->gmbus = NULL;
-+	return ret;
- }
- 
--/**
-- * intel_i2c_destroy - unregister and free i2c bus resources
-- * @output: channel to free
-- *
-- * Unregister the adapter from the i2c layer, then free the structure.
-- */
--void intel_i2c_destroy(struct i2c_adapter *adapter)
-+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
-+{
-+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-+
-+	/* speed:
-+	 * 0x0 = 100 KHz
-+	 * 0x1 = 50 KHz
-+	 * 0x2 = 400 KHz
-+	 * 0x3 = 1000 Khz
-+	 */
-+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
-+}
-+
-+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
-+{
-+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
-+
-+	if (force_bit) {
-+		if (bus->force_bit == NULL) {
-+			struct drm_i915_private *dev_priv = adapter->algo_data;
-+			bus->force_bit = intel_gpio_create(dev_priv,
-+							   bus->reg0 & 0xff);
-+		}
-+	} else {
-+		if (bus->force_bit) {
-+			i2c_del_adapter(bus->force_bit);
-+			kfree(bus->force_bit);
-+			bus->force_bit = NULL;
-+		}
-+	}
-+}
-+
-+void intel_teardown_gmbus(struct drm_device *dev)
- {
--	struct intel_i2c_chan *chan;
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	int i;
- 
--	if (!adapter)
-+	if (dev_priv->gmbus == NULL)
- 		return;
- 
--	chan = container_of(adapter,
--			    struct intel_i2c_chan,
--			    adapter);
--	i2c_del_adapter(&chan->adapter);
--	kfree(chan);
-+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
-+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
-+		if (bus->force_bit) {
-+			i2c_del_adapter(bus->force_bit);
-+			kfree(bus->force_bit);
-+		}
-+		i2c_del_adapter(&bus->adapter);
-+	}
-+
-+	kfree(dev_priv->gmbus);
-+	dev_priv->gmbus = NULL;
- }
-diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
-index 6ec39a8..4324a32 100644
---- a/drivers/gpu/drm/i915/intel_lvds.c
-+++ b/drivers/gpu/drm/i915/intel_lvds.c
-@@ -43,102 +43,76 @@
- /* Private structure for the integrated LVDS support */
- struct intel_lvds {
- 	struct intel_encoder base;
-+
-+	struct edid *edid;
-+
- 	int fitting_mode;
- 	u32 pfit_control;
- 	u32 pfit_pgm_ratios;
-+	bool pfit_dirty;
-+
-+	struct drm_display_mode *fixed_mode;
- };
- 
--static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
-+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-+	return container_of(encoder, struct intel_lvds, base.base);
- }
- 
--/**
-- * Sets the backlight level.
-- *
-- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
-- */
--static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
- {
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 blc_pwm_ctl, reg;
--
--	if (HAS_PCH_SPLIT(dev))
--		reg = BLC_PWM_CPU_CTL;
--	else
--		reg = BLC_PWM_CTL;
--
--	blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
--	I915_WRITE(reg, (blc_pwm_ctl |
--				 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
--}
--
--/**
-- * Returns the maximum level of the backlight duty cycle field.
-- */
--static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
--{
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 reg;
--
--	if (HAS_PCH_SPLIT(dev))
--		reg = BLC_PWM_PCH_CTL2;
--	else
--		reg = BLC_PWM_CTL;
--
--	return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
--		BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_lvds, base);
- }
- 
- /**
-  * Sets the power state for the panel.
-  */
--static void intel_lvds_set_power(struct drm_device *dev, bool on)
-+static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
- {
-+	struct drm_device *dev = intel_lvds->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 ctl_reg, status_reg, lvds_reg;
-+	u32 ctl_reg, lvds_reg;
- 
- 	if (HAS_PCH_SPLIT(dev)) {
- 		ctl_reg = PCH_PP_CONTROL;
--		status_reg = PCH_PP_STATUS;
- 		lvds_reg = PCH_LVDS;
- 	} else {
- 		ctl_reg = PP_CONTROL;
--		status_reg = PP_STATUS;
- 		lvds_reg = LVDS;
- 	}
- 
- 	if (on) {
- 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
--		POSTING_READ(lvds_reg);
--
--		I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
--			   POWER_TARGET_ON);
--		if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
--			DRM_ERROR("timed out waiting to enable LVDS pipe");
--
--		intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
-+		I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
-+		intel_panel_set_backlight(dev, dev_priv->backlight_level);
- 	} else {
--		intel_lvds_set_backlight(dev, 0);
-+		dev_priv->backlight_level = intel_panel_get_backlight(dev);
- 
--		I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
--			   ~POWER_TARGET_ON);
--		if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
--			DRM_ERROR("timed out waiting for LVDS pipe to turn off");
-+		intel_panel_set_backlight(dev, 0);
-+		I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-+
-+		if (intel_lvds->pfit_control) {
-+			if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
-+				DRM_ERROR("timed out waiting for panel to power off\n");
-+			I915_WRITE(PFIT_CONTROL, 0);
-+			intel_lvds->pfit_control = 0;
-+			intel_lvds->pfit_dirty = false;
-+		}
- 
- 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
--		POSTING_READ(lvds_reg);
- 	}
-+	POSTING_READ(lvds_reg);
- }
- 
- static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
- {
--	struct drm_device *dev = encoder->dev;
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- 
- 	if (mode == DRM_MODE_DPMS_ON)
--		intel_lvds_set_power(dev, true);
-+		intel_lvds_set_power(intel_lvds, true);
- 	else
--		intel_lvds_set_power(dev, false);
-+		intel_lvds_set_power(intel_lvds, false);
- 
- 	/* XXX: We never power down the LVDS pairs. */
- }
-@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
- static int intel_lvds_mode_valid(struct drm_connector *connector,
- 				 struct drm_display_mode *mode)
- {
--	struct drm_device *dev = connector->dev;
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-+	struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
- 
--	if (fixed_mode)	{
--		if (mode->hdisplay > fixed_mode->hdisplay)
--			return MODE_PANEL;
--		if (mode->vdisplay > fixed_mode->vdisplay)
--			return MODE_PANEL;
--	}
-+	if (mode->hdisplay > fixed_mode->hdisplay)
-+		return MODE_PANEL;
-+	if (mode->vdisplay > fixed_mode->vdisplay)
-+		return MODE_PANEL;
- 
- 	return MODE_OK;
- }
-@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
--	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- 	struct drm_encoder *tmp_encoder;
- 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- 
- 	/* Should never happen!! */
--	if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
-+	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
- 		DRM_ERROR("Can't support LVDS on pipe A\n");
- 		return false;
- 	}
-@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 			return false;
- 		}
- 	}
--	/* If we don't have a panel mode, there is nothing we can do */
--	if (dev_priv->panel_fixed_mode == NULL)
--		return true;
- 
- 	/*
- 	 * We have timings from the BIOS for the panel, put them in
-@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 	 * with the panel scaling set up to source from the H/VDisplay
- 	 * of the original mode.
- 	 */
--	intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
-+	intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
- 
- 	if (HAS_PCH_SPLIT(dev)) {
- 		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
-@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 	}
- 
- 	/* Make sure pre-965s set dither correctly */
--	if (!IS_I965G(dev)) {
--		if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
-+	if (INTEL_INFO(dev)->gen < 4) {
-+		if (dev_priv->lvds_dither)
- 			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- 	}
- 
-@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 		goto out;
- 
- 	/* 965+ wants fuzzy fitting */
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- 				 PFIT_FILTER_FUZZY);
- 
-@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 
- 	case DRM_MODE_SCALE_ASPECT:
- 		/* Scale but preserve the aspect ratio */
--		if (IS_I965G(dev)) {
-+		if (INTEL_INFO(dev)->gen >= 4) {
- 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
- 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- 
-@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 		 * Fortunately this is all done for us in hw.
- 		 */
- 		pfit_control |= PFIT_ENABLE;
--		if (IS_I965G(dev))
-+		if (INTEL_INFO(dev)->gen >= 4)
- 			pfit_control |= PFIT_SCALING_AUTO;
- 		else
- 			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- 	}
- 
- out:
--	intel_lvds->pfit_control = pfit_control;
--	intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
-+	if (pfit_control != intel_lvds->pfit_control ||
-+	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
-+		intel_lvds->pfit_control = pfit_control;
-+		intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
-+		intel_lvds->pfit_dirty = true;
-+	}
- 	dev_priv->lvds_border_bits = border;
- 
- 	/*
-@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 reg;
--
--	if (HAS_PCH_SPLIT(dev))
--		reg = BLC_PWM_CPU_CTL;
--	else
--		reg = BLC_PWM_CTL;
--
--	dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
--	dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
--				       BACKLIGHT_DUTY_CYCLE_MASK);
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-+
-+	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-+
-+	/* We try to do the minimum that is necessary in order to unlock
-+	 * the registers for mode setting.
-+	 *
-+	 * On Ironlake, this is quite simple as we just set the unlock key
-+	 * and ignore all subtleties. (This may cause some issues...)
-+	 *
-+	 * Prior to Ironlake, we must disable the pipe if we want to adjust
-+	 * the panel fitter. However at all other times we can just reset
-+	 * the registers regardless.
-+	 */
- 
--	intel_lvds_set_power(dev, false);
-+	if (HAS_PCH_SPLIT(dev)) {
-+		I915_WRITE(PCH_PP_CONTROL,
-+			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-+	} else if (intel_lvds->pfit_dirty) {
-+		I915_WRITE(PP_CONTROL,
-+			   (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
-+			   & ~POWER_TARGET_ON);
-+	} else {
-+		I915_WRITE(PP_CONTROL,
-+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-+	}
- }
- 
--static void intel_lvds_commit( struct drm_encoder *encoder)
-+static void intel_lvds_commit(struct drm_encoder *encoder)
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- 
--	if (dev_priv->backlight_duty_cycle == 0)
--		dev_priv->backlight_duty_cycle =
--			intel_lvds_get_max_backlight(dev);
-+	if (dev_priv->backlight_level == 0)
-+		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
- 
--	intel_lvds_set_power(dev, true);
-+	/* Undo any unlocking done in prepare to prevent accidental
-+	 * adjustment of the registers.
-+	 */
-+	if (HAS_PCH_SPLIT(dev)) {
-+		u32 val = I915_READ(PCH_PP_CONTROL);
-+		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
-+			I915_WRITE(PCH_PP_CONTROL, val & 0x3);
-+	} else {
-+		u32 val = I915_READ(PP_CONTROL);
-+		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
-+			I915_WRITE(PP_CONTROL, val & 0x3);
-+	}
-+
-+	/* Always do a full power on as we do not know what state
-+	 * we were left in.
-+	 */
-+	intel_lvds_set_power(intel_lvds, true);
- }
- 
- static void intel_lvds_mode_set(struct drm_encoder *encoder,
-@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
-+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- 
- 	/*
- 	 * The LVDS pin pair will already have been turned on in the
-@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
- 	if (HAS_PCH_SPLIT(dev))
- 		return;
- 
-+	if (!intel_lvds->pfit_dirty)
-+		return;
-+
- 	/*
- 	 * Enable automatic panel scaling so that non-native modes fill the
- 	 * screen.  Should be enabled before the pipe is enabled, according to
- 	 * register description and PRM.
- 	 */
-+	DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
-+		      intel_lvds->pfit_control,
-+		      intel_lvds->pfit_pgm_ratios);
-+	if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
-+		DRM_ERROR("timed out waiting for panel to power off\n");
-+
- 	I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- 	I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
-+	intel_lvds->pfit_dirty = false;
- }
- 
- /**
-@@ -465,38 +477,19 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
-  */
- static int intel_lvds_get_modes(struct drm_connector *connector)
- {
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- 	struct drm_device *dev = connector->dev;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	int ret = 0;
--
--	if (dev_priv->lvds_edid_good) {
--		ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
--
--		if (ret)
--			return ret;
--	}
-+	struct drm_display_mode *mode;
- 
--	/* Didn't get an EDID, so
--	 * Set wide sync ranges so we get all modes
--	 * handed to valid_mode for checking
--	 */
--	connector->display_info.min_vfreq = 0;
--	connector->display_info.max_vfreq = 200;
--	connector->display_info.min_hfreq = 0;
--	connector->display_info.max_hfreq = 200;
--
--	if (dev_priv->panel_fixed_mode != NULL) {
--		struct drm_display_mode *mode;
--
--		mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
--		drm_mode_probed_add(connector, mode);
-+	if (intel_lvds->edid)
-+		return drm_add_edid_modes(connector, intel_lvds->edid);
- 
--		return 1;
--	}
-+	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
-+	if (mode == 0)
-+		return 0;
- 
--	return 0;
-+	drm_mode_probed_add(connector, mode);
-+	return 1;
- }
- 
- static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-@@ -587,18 +580,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
- 				   struct drm_property *property,
- 				   uint64_t value)
- {
-+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- 	struct drm_device *dev = connector->dev;
- 
--	if (property == dev->mode_config.scaling_mode_property &&
--				connector->encoder) {
--		struct drm_crtc *crtc = connector->encoder->crtc;
--		struct drm_encoder *encoder = connector->encoder;
--		struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
-+	if (property == dev->mode_config.scaling_mode_property) {
-+		struct drm_crtc *crtc = intel_lvds->base.base.crtc;
- 
- 		if (value == DRM_MODE_SCALE_NONE) {
- 			DRM_DEBUG_KMS("no scaling not supported\n");
--			return 0;
-+			return -EINVAL;
- 		}
-+
- 		if (intel_lvds->fitting_mode == value) {
- 			/* the LVDS scaling property is not changed */
- 			return 0;
-@@ -628,7 +620,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
- 	.get_modes = intel_lvds_get_modes,
- 	.mode_valid = intel_lvds_mode_valid,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-@@ -726,16 +718,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
-  * Find the reduced downclock for LVDS in EDID.
-  */
- static void intel_find_lvds_downclock(struct drm_device *dev,
--				struct drm_connector *connector)
-+				      struct drm_display_mode *fixed_mode,
-+				      struct drm_connector *connector)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_display_mode *scan, *panel_fixed_mode;
-+	struct drm_display_mode *scan;
- 	int temp_downclock;
- 
--	panel_fixed_mode = dev_priv->panel_fixed_mode;
--	temp_downclock = panel_fixed_mode->clock;
--
--	mutex_lock(&dev->mode_config.mutex);
-+	temp_downclock = fixed_mode->clock;
- 	list_for_each_entry(scan, &connector->probed_modes, head) {
- 		/*
- 		 * If one mode has the same resolution with the fixed_panel
-@@ -744,14 +734,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
- 		 * case we can set the different FPx0/1 to dynamically select
- 		 * between low and high frequency.
- 		 */
--		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
--			scan->hsync_start == panel_fixed_mode->hsync_start &&
--			scan->hsync_end == panel_fixed_mode->hsync_end &&
--			scan->htotal == panel_fixed_mode->htotal &&
--			scan->vdisplay == panel_fixed_mode->vdisplay &&
--			scan->vsync_start == panel_fixed_mode->vsync_start &&
--			scan->vsync_end == panel_fixed_mode->vsync_end &&
--			scan->vtotal == panel_fixed_mode->vtotal) {
-+		if (scan->hdisplay == fixed_mode->hdisplay &&
-+		    scan->hsync_start == fixed_mode->hsync_start &&
-+		    scan->hsync_end == fixed_mode->hsync_end &&
-+		    scan->htotal == fixed_mode->htotal &&
-+		    scan->vdisplay == fixed_mode->vdisplay &&
-+		    scan->vsync_start == fixed_mode->vsync_start &&
-+		    scan->vsync_end == fixed_mode->vsync_end &&
-+		    scan->vtotal == fixed_mode->vtotal) {
- 			if (scan->clock < temp_downclock) {
- 				/*
- 				 * The downclock is already found. But we
-@@ -761,17 +751,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
- 			}
- 		}
- 	}
--	mutex_unlock(&dev->mode_config.mutex);
--	if (temp_downclock < panel_fixed_mode->clock &&
--	    i915_lvds_downclock) {
-+	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
- 		/* We found the downclock for LVDS. */
- 		dev_priv->lvds_downclock_avail = 1;
- 		dev_priv->lvds_downclock = temp_downclock;
- 		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
--				"Normal clock %dKhz, downclock %dKhz\n",
--				panel_fixed_mode->clock, temp_downclock);
-+			      "Normal clock %dKhz, downclock %dKhz\n",
-+			      fixed_mode->clock, temp_downclock);
- 	}
--	return;
- }
- 
- /*
-@@ -780,38 +767,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
-  * If it is present, return 1.
-  * If it is not present, return false.
-  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
-- * Note: The addin_offset should also be checked for LVDS panel.
-- * Only when it is non-zero, it is assumed that it is present.
-  */
--static int lvds_is_present_in_vbt(struct drm_device *dev)
-+static bool lvds_is_present_in_vbt(struct drm_device *dev,
-+				   u8 *i2c_pin)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct child_device_config *p_child;
--	int i, ret;
-+	int i;
- 
- 	if (!dev_priv->child_dev_num)
--		return 1;
-+		return true;
- 
--	ret = 0;
- 	for (i = 0; i < dev_priv->child_dev_num; i++) {
--		p_child = dev_priv->child_dev + i;
--		/*
--		 * If the device type is not LFP, continue.
--		 * If the device type is 0x22, it is also regarded as LFP.
-+		struct child_device_config *child = dev_priv->child_dev + i;
-+
-+		/* If the device type is not LFP, continue.
-+		 * We have to check both the new identifiers as well as the
-+		 * old for compatibility with some BIOSes.
- 		 */
--		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
--			p_child->device_type != DEVICE_TYPE_LFP)
-+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
-+		    child->device_type != DEVICE_TYPE_LFP)
- 			continue;
- 
--		/* The addin_offset should be checked. Only when it is
--		 * non-zero, it is regarded as present.
-+		if (child->i2c_pin)
-+		    *i2c_pin = child->i2c_pin;
-+
-+		/* However, we cannot trust the BIOS writers to populate
-+		 * the VBT correctly.  Since LVDS requires additional
-+		 * information from AIM blocks, a non-zero addin offset is
-+		 * a good indicator that the LVDS is actually present.
- 		 */
--		if (p_child->addin_offset) {
--			ret = 1;
--			break;
--		}
-+		if (child->addin_offset)
-+			return true;
-+
-+		/* But even then some BIOS writers perform some black magic
-+		 * and instantiate the device without reference to any
-+		 * additional data.  Trust that if the VBT was written into
-+		 * the OpRegion then they have validated the LVDS's existence.
-+		 */
-+		if (dev_priv->opregion.vbt)
-+			return true;
- 	}
--	return ret;
-+
-+	return false;
-+}
-+
-+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u8 buf = 0;
-+	struct i2c_msg msgs[] = {
-+		{
-+			.addr = 0xA0,
-+			.flags = 0,
-+			.len = 1,
-+			.buf = &buf,
-+		},
-+	};
-+	struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
-+	/* XXX this only appears to work when using GMBUS */
-+	if (intel_gmbus_is_forced_bit(i2c))
-+		return true;
-+	return i2c_transfer(i2c, msgs, 1) == 1;
- }
- 
- /**
-@@ -832,13 +848,15 @@ void intel_lvds_init(struct drm_device *dev)
- 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
- 	struct drm_crtc *crtc;
- 	u32 lvds;
--	int pipe, gpio = GPIOC;
-+	int pipe;
-+	u8 pin;
- 
- 	/* Skip init on machines we know falsely report LVDS */
- 	if (dmi_check_system(intel_no_lvds))
- 		return;
- 
--	if (!lvds_is_present_in_vbt(dev)) {
-+	pin = GMBUS_PORT_PANEL;
-+	if (!lvds_is_present_in_vbt(dev, &pin)) {
- 		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- 		return;
- 	}
-@@ -846,11 +864,15 @@ void intel_lvds_init(struct drm_device *dev)
- 	if (HAS_PCH_SPLIT(dev)) {
- 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- 			return;
--		if (dev_priv->edp_support) {
-+		if (dev_priv->edp.support) {
- 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- 			return;
- 		}
--		gpio = PCH_GPIOC;
-+	}
-+
-+	if (!intel_lvds_ddc_probe(dev, pin)) {
-+		DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
-+		return;
- 	}
- 
- 	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
-@@ -864,16 +886,20 @@ void intel_lvds_init(struct drm_device *dev)
- 		return;
- 	}
- 
-+	if (!HAS_PCH_SPLIT(dev)) {
-+		intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
-+	}
-+
- 	intel_encoder = &intel_lvds->base;
--	encoder = &intel_encoder->enc;
-+	encoder = &intel_encoder->base;
- 	connector = &intel_connector->base;
- 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
- 			   DRM_MODE_CONNECTOR_LVDS);
- 
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
- 			 DRM_MODE_ENCODER_LVDS);
- 
--	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
- 	intel_encoder->type = INTEL_OUTPUT_LVDS;
- 
- 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
-@@ -904,43 +930,50 @@ void intel_lvds_init(struct drm_device *dev)
- 	 *    if closed, act like it's not there for now
- 	 */
- 
--	/* Set up the DDC bus. */
--	intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
--	if (!intel_encoder->ddc_bus) {
--		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
--			   "failed.\n");
--		goto failed;
--	}
--
- 	/*
- 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
- 	 * preferred mode is the right one.
- 	 */
--	dev_priv->lvds_edid_good = true;
--
--	if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
--		dev_priv->lvds_edid_good = false;
-+	intel_lvds->edid = drm_get_edid(connector,
-+					&dev_priv->gmbus[pin].adapter);
-+	if (intel_lvds->edid) {
-+		if (drm_add_edid_modes(connector,
-+				       intel_lvds->edid)) {
-+			drm_mode_connector_update_edid_property(connector,
-+								intel_lvds->edid);
-+		} else {
-+			kfree(intel_lvds->edid);
-+			intel_lvds->edid = NULL;
-+		}
-+	}
-+	if (!intel_lvds->edid) {
-+		/* Didn't get an EDID, so
-+		 * Set wide sync ranges so we get all modes
-+		 * handed to valid_mode for checking
-+		 */
-+		connector->display_info.min_vfreq = 0;
-+		connector->display_info.max_vfreq = 200;
-+		connector->display_info.min_hfreq = 0;
-+		connector->display_info.max_hfreq = 200;
-+	}
- 
- 	list_for_each_entry(scan, &connector->probed_modes, head) {
--		mutex_lock(&dev->mode_config.mutex);
- 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
--			dev_priv->panel_fixed_mode =
-+			intel_lvds->fixed_mode =
- 				drm_mode_duplicate(dev, scan);
--			mutex_unlock(&dev->mode_config.mutex);
--			intel_find_lvds_downclock(dev, connector);
-+			intel_find_lvds_downclock(dev,
-+						  intel_lvds->fixed_mode,
-+						  connector);
- 			goto out;
- 		}
--		mutex_unlock(&dev->mode_config.mutex);
- 	}
- 
- 	/* Failed to get EDID, what about VBT? */
- 	if (dev_priv->lfp_lvds_vbt_mode) {
--		mutex_lock(&dev->mode_config.mutex);
--		dev_priv->panel_fixed_mode =
-+		intel_lvds->fixed_mode =
- 			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
--		mutex_unlock(&dev->mode_config.mutex);
--		if (dev_priv->panel_fixed_mode) {
--			dev_priv->panel_fixed_mode->type |=
-+		if (intel_lvds->fixed_mode) {
-+			intel_lvds->fixed_mode->type |=
- 				DRM_MODE_TYPE_PREFERRED;
- 			goto out;
- 		}
-@@ -958,19 +991,19 @@ void intel_lvds_init(struct drm_device *dev)
- 
- 	lvds = I915_READ(LVDS);
- 	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
--	crtc = intel_get_crtc_from_pipe(dev, pipe);
-+	crtc = intel_get_crtc_for_pipe(dev, pipe);
- 
- 	if (crtc && (lvds & LVDS_PORT_EN)) {
--		dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
--		if (dev_priv->panel_fixed_mode) {
--			dev_priv->panel_fixed_mode->type |=
-+		intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
-+		if (intel_lvds->fixed_mode) {
-+			intel_lvds->fixed_mode->type |=
- 				DRM_MODE_TYPE_PREFERRED;
- 			goto out;
- 		}
- 	}
- 
- 	/* If we still don't have a mode after all that, give up. */
--	if (!dev_priv->panel_fixed_mode)
-+	if (!intel_lvds->fixed_mode)
- 		goto failed;
- 
- out:
-@@ -997,8 +1030,6 @@ out:
- 
- failed:
- 	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
--	if (intel_encoder->ddc_bus)
--		intel_i2c_destroy(intel_encoder->ddc_bus);
- 	drm_connector_cleanup(connector);
- 	drm_encoder_cleanup(encoder);
- 	kfree(intel_lvds);
-diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
-index 4b1fd3d..f70b7cf 100644
---- a/drivers/gpu/drm/i915/intel_modes.c
-+++ b/drivers/gpu/drm/i915/intel_modes.c
-@@ -1,6 +1,6 @@
- /*
-  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
-- * Copyright (c) 2007 Intel Corporation
-+ * Copyright (c) 2007, 2010 Intel Corporation
-  *   Jesse Barnes <jesse.barnes@intel.com>
-  *
-  * Permission is hereby granted, free of charge, to any person obtaining a
-@@ -34,11 +34,11 @@
-  * intel_ddc_probe
-  *
-  */
--bool intel_ddc_probe(struct intel_encoder *intel_encoder)
-+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
- {
-+	struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
- 	u8 out_buf[] = { 0x0, 0x0};
- 	u8 buf[2];
--	int ret;
- 	struct i2c_msg msgs[] = {
- 		{
- 			.addr = 0x50,
-@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
- 		}
- 	};
- 
--	intel_i2c_quirk_set(intel_encoder->enc.dev, true);
--	ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
--	intel_i2c_quirk_set(intel_encoder->enc.dev, false);
--	if (ret == 2)
--		return true;
--
--	return false;
-+	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
- }
- 
- /**
-@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
- 	struct edid *edid;
- 	int ret = 0;
- 
--	intel_i2c_quirk_set(connector->dev, true);
- 	edid = drm_get_edid(connector, adapter);
--	intel_i2c_quirk_set(connector->dev, false);
- 	if (edid) {
- 		drm_mode_connector_update_edid_property(connector, edid);
- 		ret = drm_add_edid_modes(connector, edid);
-diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
-new file mode 100644
-index 0000000..9b0d9a8
---- /dev/null
-+++ b/drivers/gpu/drm/i915/intel_opregion.c
-@@ -0,0 +1,517 @@
-+/*
-+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
-+ * Copyright 2008 Red Hat <mjg@redhat.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining
-+ * a copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial
-+ * portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
-+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/acpi.h>
-+#include <acpi/video.h>
-+
-+#include "drmP.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+#include "intel_drv.h"
-+
-+#define PCI_ASLE 0xe4
-+#define PCI_ASLS 0xfc
-+
-+#define OPREGION_HEADER_OFFSET 0
-+#define OPREGION_ACPI_OFFSET   0x100
-+#define OPREGION_SWSCI_OFFSET  0x200
-+#define OPREGION_ASLE_OFFSET   0x300
-+#define OPREGION_VBT_OFFSET    0x400
-+
-+#define OPREGION_SIGNATURE "IntelGraphicsMem"
-+#define MBOX_ACPI      (1<<0)
-+#define MBOX_SWSCI     (1<<1)
-+#define MBOX_ASLE      (1<<2)
-+
-+struct opregion_header {
-+       u8 signature[16];
-+       u32 size;
-+       u32 opregion_ver;
-+       u8 bios_ver[32];
-+       u8 vbios_ver[16];
-+       u8 driver_ver[16];
-+       u32 mboxes;
-+       u8 reserved[164];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #1: public ACPI methods */
-+struct opregion_acpi {
-+       u32 drdy;       /* driver readiness */
-+       u32 csts;       /* notification status */
-+       u32 cevt;       /* current event */
-+       u8 rsvd1[20];
-+       u32 didl[8];    /* supported display devices ID list */
-+       u32 cpdl[8];    /* currently presented display list */
-+       u32 cadl[8];    /* currently active display list */
-+       u32 nadl[8];    /* next active devices list */
-+       u32 aslp;       /* ASL sleep time-out */
-+       u32 tidx;       /* toggle table index */
-+       u32 chpd;       /* current hotplug enable indicator */
-+       u32 clid;       /* current lid state*/
-+       u32 cdck;       /* current docking state */
-+       u32 sxsw;       /* Sx state resume */
-+       u32 evts;       /* ASL supported events */
-+       u32 cnot;       /* current OS notification */
-+       u32 nrdy;       /* driver status */
-+       u8 rsvd2[60];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #2: SWSCI */
-+struct opregion_swsci {
-+       u32 scic;       /* SWSCI command|status|data */
-+       u32 parm;       /* command parameters */
-+       u32 dslp;       /* driver sleep time-out */
-+       u8 rsvd[244];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #3: ASLE */
-+struct opregion_asle {
-+       u32 ardy;       /* driver readiness */
-+       u32 aslc;       /* ASLE interrupt command */
-+       u32 tche;       /* technology enabled indicator */
-+       u32 alsi;       /* current ALS illuminance reading */
-+       u32 bclp;       /* backlight brightness to set */
-+       u32 pfit;       /* panel fitting state */
-+       u32 cblv;       /* current brightness level */
-+       u16 bclm[20];   /* backlight level duty cycle mapping table */
-+       u32 cpfm;       /* current panel fitting mode */
-+       u32 epfm;       /* enabled panel fitting modes */
-+       u8 plut[74];    /* panel LUT and identifier */
-+       u32 pfmb;       /* PWM freq and min brightness */
-+       u8 rsvd[102];
-+} __attribute__((packed));
-+
-+/* ASLE irq request bits */
-+#define ASLE_SET_ALS_ILLUM     (1 << 0)
-+#define ASLE_SET_BACKLIGHT     (1 << 1)
-+#define ASLE_SET_PFIT          (1 << 2)
-+#define ASLE_SET_PWM_FREQ      (1 << 3)
-+#define ASLE_REQ_MSK           0xf
-+
-+/* response bits of ASLE irq request */
-+#define ASLE_ALS_ILLUM_FAILED	(1<<10)
-+#define ASLE_BACKLIGHT_FAILED	(1<<12)
-+#define ASLE_PFIT_FAILED	(1<<14)
-+#define ASLE_PWM_FREQ_FAILED	(1<<16)
-+
-+/* ASLE backlight brightness to set */
-+#define ASLE_BCLP_VALID                (1<<31)
-+#define ASLE_BCLP_MSK          (~(1<<31))
-+
-+/* ASLE panel fitting request */
-+#define ASLE_PFIT_VALID         (1<<31)
-+#define ASLE_PFIT_CENTER (1<<0)
-+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-+#define ASLE_PFIT_STRETCH_GFX (1<<2)
-+
-+/* PWM frequency and minimum brightness */
-+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-+#define ASLE_PFMB_PWM_VALID (1<<31)
-+
-+#define ASLE_CBLV_VALID         (1<<31)
-+
-+#define ACPI_OTHER_OUTPUT (0<<8)
-+#define ACPI_VGA_OUTPUT (1<<8)
-+#define ACPI_TV_OUTPUT (2<<8)
-+#define ACPI_DIGITAL_OUTPUT (3<<8)
-+#define ACPI_LVDS_OUTPUT (4<<8)
-+
-+#ifdef CONFIG_ACPI
-+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct opregion_asle *asle = dev_priv->opregion.asle;
-+	u32 max;
-+
-+	if (!(bclp & ASLE_BCLP_VALID))
-+		return ASLE_BACKLIGHT_FAILED;
-+
-+	bclp &= ASLE_BCLP_MSK;
-+	if (bclp > 255)
-+		return ASLE_BACKLIGHT_FAILED;
-+
-+	max = intel_panel_get_max_backlight(dev);
-+	intel_panel_set_backlight(dev, bclp * max / 255);
-+	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-+
-+	return 0;
-+}
-+
-+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
-+{
-+	/* alsi is the current ALS reading in lux. 0 indicates below sensor
-+	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-+	return 0;
-+}
-+
-+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	if (pfmb & ASLE_PFMB_PWM_VALID) {
-+		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-+		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
-+		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
-+		pwm = pwm >> 9;
-+		/* FIXME - what do we do with the PWM? */
-+	}
-+	return 0;
-+}
-+
-+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
-+{
-+	/* Panel fitting is currently controlled by the X code, so this is a
-+	   noop until modesetting support works fully */
-+	if (!(pfit & ASLE_PFIT_VALID))
-+		return ASLE_PFIT_FAILED;
-+	return 0;
-+}
-+
-+void intel_opregion_asle_intr(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct opregion_asle *asle = dev_priv->opregion.asle;
-+	u32 asle_stat = 0;
-+	u32 asle_req;
-+
-+	if (!asle)
-+		return;
-+
-+	asle_req = asle->aslc & ASLE_REQ_MSK;
-+
-+	if (!asle_req) {
-+		DRM_DEBUG_DRIVER("non asle set request??\n");
-+		return;
-+	}
-+
-+	if (asle_req & ASLE_SET_ALS_ILLUM)
-+		asle_stat |= asle_set_als_illum(dev, asle->alsi);
-+
-+	if (asle_req & ASLE_SET_BACKLIGHT)
-+		asle_stat |= asle_set_backlight(dev, asle->bclp);
-+
-+	if (asle_req & ASLE_SET_PFIT)
-+		asle_stat |= asle_set_pfit(dev, asle->pfit);
-+
-+	if (asle_req & ASLE_SET_PWM_FREQ)
-+		asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
-+
-+	asle->aslc = asle_stat;
-+}
-+
-+/* Only present on Ironlake+ */
-+void intel_opregion_gse_intr(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct opregion_asle *asle = dev_priv->opregion.asle;
-+	u32 asle_stat = 0;
-+	u32 asle_req;
-+
-+	if (!asle)
-+		return;
-+
-+	asle_req = asle->aslc & ASLE_REQ_MSK;
-+
-+	if (!asle_req) {
-+		DRM_DEBUG_DRIVER("non asle set request??\n");
-+		return;
-+	}
-+
-+	if (asle_req & ASLE_SET_ALS_ILLUM) {
-+		DRM_DEBUG_DRIVER("Illum is not supported\n");
-+		asle_stat |= ASLE_ALS_ILLUM_FAILED;
-+	}
-+
-+	if (asle_req & ASLE_SET_BACKLIGHT)
-+		asle_stat |= asle_set_backlight(dev, asle->bclp);
-+
-+	if (asle_req & ASLE_SET_PFIT) {
-+		DRM_DEBUG_DRIVER("Pfit is not supported\n");
-+		asle_stat |= ASLE_PFIT_FAILED;
-+	}
-+
-+	if (asle_req & ASLE_SET_PWM_FREQ) {
-+		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-+		asle_stat |= ASLE_PWM_FREQ_FAILED;
-+	}
-+
-+	asle->aslc = asle_stat;
-+}
-+#define ASLE_ALS_EN    (1<<0)
-+#define ASLE_BLC_EN    (1<<1)
-+#define ASLE_PFIT_EN   (1<<2)
-+#define ASLE_PFMB_EN   (1<<3)
-+
-+void intel_opregion_enable_asle(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct opregion_asle *asle = dev_priv->opregion.asle;
-+
-+	if (asle) {
-+		if (IS_MOBILE(dev)) {
-+			unsigned long irqflags;
-+
-+			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-+			intel_enable_asle(dev);
-+			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
-+					       irqflags);
-+		}
-+
-+		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
-+			ASLE_PFMB_EN;
-+		asle->ardy = 1;
-+	}
-+}
-+
-+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-+#define ACPI_EV_LID            (1<<1)
-+#define ACPI_EV_DOCK           (1<<2)
-+
-+static struct intel_opregion *system_opregion;
-+
-+static int intel_opregion_video_event(struct notifier_block *nb,
-+				      unsigned long val, void *data)
-+{
-+	/* The only video events relevant to opregion are 0x80. These indicate
-+	   either a docking event, lid switch or display switch request. In
-+	   Linux, these are handled by the dock, button and video drivers.
-+	   We might want to fix the video driver to be opregion-aware in
-+	   future, but right now we just indicate to the firmware that the
-+	   request has been handled */
-+
-+	struct opregion_acpi *acpi;
-+
-+	if (!system_opregion)
-+		return NOTIFY_DONE;
-+
-+	acpi = system_opregion->acpi;
-+	acpi->csts = 0;
-+
-+	return NOTIFY_OK;
-+}
-+
-+static struct notifier_block intel_opregion_notifier = {
-+	.notifier_call = intel_opregion_video_event,
-+};
-+
-+/*
-+ * Initialise the DIDL field in opregion. This passes a list of devices to
-+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
-+ * (version 3)
-+ */
-+
-+static void intel_didl_outputs(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_opregion *opregion = &dev_priv->opregion;
-+	struct drm_connector *connector;
-+	acpi_handle handle;
-+	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
-+	unsigned long long device_id;
-+	acpi_status status;
-+	int i = 0;
-+
-+	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
-+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
-+		return;
-+
-+	if (acpi_is_video_device(acpi_dev))
-+		acpi_video_bus = acpi_dev;
-+	else {
-+		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
-+			if (acpi_is_video_device(acpi_cdev)) {
-+				acpi_video_bus = acpi_cdev;
-+				break;
-+			}
-+		}
-+	}
-+
-+	if (!acpi_video_bus) {
-+		printk(KERN_WARNING "No ACPI video bus found\n");
-+		return;
-+	}
-+
-+	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-+		if (i >= 8) {
-+			dev_printk (KERN_ERR, &dev->pdev->dev,
-+				    "More than 8 outputs detected\n");
-+			return;
-+		}
-+		status =
-+			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-+						NULL, &device_id);
-+		if (ACPI_SUCCESS(status)) {
-+			if (!device_id)
-+				goto blind_set;
-+			opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
-+			i++;
-+		}
-+	}
-+
-+end:
-+	/* If fewer than 8 outputs, the list must be null terminated */
-+	if (i < 8)
-+		opregion->acpi->didl[i] = 0;
-+	return;
-+
-+blind_set:
-+	i = 0;
-+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+		int output_type = ACPI_OTHER_OUTPUT;
-+		if (i >= 8) {
-+			dev_printk (KERN_ERR, &dev->pdev->dev,
-+				    "More than 8 outputs detected\n");
-+			return;
-+		}
-+		switch (connector->connector_type) {
-+		case DRM_MODE_CONNECTOR_VGA:
-+		case DRM_MODE_CONNECTOR_DVIA:
-+			output_type = ACPI_VGA_OUTPUT;
-+			break;
-+		case DRM_MODE_CONNECTOR_Composite:
-+		case DRM_MODE_CONNECTOR_SVIDEO:
-+		case DRM_MODE_CONNECTOR_Component:
-+		case DRM_MODE_CONNECTOR_9PinDIN:
-+			output_type = ACPI_TV_OUTPUT;
-+			break;
-+		case DRM_MODE_CONNECTOR_DVII:
-+		case DRM_MODE_CONNECTOR_DVID:
-+		case DRM_MODE_CONNECTOR_DisplayPort:
-+		case DRM_MODE_CONNECTOR_HDMIA:
-+		case DRM_MODE_CONNECTOR_HDMIB:
-+			output_type = ACPI_DIGITAL_OUTPUT;
-+			break;
-+		case DRM_MODE_CONNECTOR_LVDS:
-+			output_type = ACPI_LVDS_OUTPUT;
-+			break;
-+		}
-+		opregion->acpi->didl[i] |= (1<<31) | output_type | i;
-+		i++;
-+	}
-+	goto end;
-+}
-+
-+void intel_opregion_init(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_opregion *opregion = &dev_priv->opregion;
-+
-+	if (!opregion->header)
-+		return;
-+
-+	if (opregion->acpi) {
-+		if (drm_core_check_feature(dev, DRIVER_MODESET))
-+			intel_didl_outputs(dev);
-+
-+		/* Notify BIOS we are ready to handle ACPI video ext notifs.
-+		 * Right now, all the events are handled by the ACPI video module.
-+		 * We don't actually need to do anything with them. */
-+		opregion->acpi->csts = 0;
-+		opregion->acpi->drdy = 1;
-+
-+		system_opregion = opregion;
-+		register_acpi_notifier(&intel_opregion_notifier);
-+	}
-+
-+	if (opregion->asle)
-+		intel_opregion_enable_asle(dev);
-+}
-+
-+void intel_opregion_fini(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_opregion *opregion = &dev_priv->opregion;
-+
-+	if (!opregion->header)
-+		return;
-+
-+	if (opregion->acpi) {
-+		opregion->acpi->drdy = 0;
-+
-+		system_opregion = NULL;
-+		unregister_acpi_notifier(&intel_opregion_notifier);
-+	}
-+
-+	/* just clear all opregion memory pointers now */
-+	iounmap(opregion->header);
-+	opregion->header = NULL;
-+	opregion->acpi = NULL;
-+	opregion->swsci = NULL;
-+	opregion->asle = NULL;
-+	opregion->vbt = NULL;
-+}
-+#endif
-+
-+int intel_opregion_setup(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	struct intel_opregion *opregion = &dev_priv->opregion;
-+	void *base;
-+	u32 asls, mboxes;
-+	int err = 0;
-+
-+	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
-+	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
-+	if (asls == 0) {
-+		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
-+		return -ENOTSUPP;
-+	}
-+
-+	base = ioremap(asls, OPREGION_SIZE);
-+	if (!base)
-+		return -ENOMEM;
-+
-+	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
-+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
-+		err = -EINVAL;
-+		goto err_out;
-+	}
-+	opregion->header = base;
-+	opregion->vbt = base + OPREGION_VBT_OFFSET;
-+
-+	mboxes = opregion->header->mboxes;
-+	if (mboxes & MBOX_ACPI) {
-+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
-+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
-+	}
-+
-+	if (mboxes & MBOX_SWSCI) {
-+		DRM_DEBUG_DRIVER("SWSCI supported\n");
-+		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-+	}
-+	if (mboxes & MBOX_ASLE) {
-+		DRM_DEBUG_DRIVER("ASLE supported\n");
-+		opregion->asle = base + OPREGION_ASLE_OFFSET;
-+	}
-+
-+	return 0;
-+
-+err_out:
-+	iounmap(base);
-+	return err;
-+}
-diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
-index 1d306a4..5b513ea 100644
---- a/drivers/gpu/drm/i915/intel_overlay.c
-+++ b/drivers/gpu/drm/i915/intel_overlay.c
-@@ -170,57 +170,143 @@ struct overlay_registers {
-     u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
- };
- 
--/* overlay flip addr flag */
--#define OFC_UPDATE		0x1
--
--#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
--#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
--
-+struct intel_overlay {
-+	struct drm_device *dev;
-+	struct intel_crtc *crtc;
-+	struct drm_i915_gem_object *vid_bo;
-+	struct drm_i915_gem_object *old_vid_bo;
-+	int active;
-+	int pfit_active;
-+	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
-+	u32 color_key;
-+	u32 brightness, contrast, saturation;
-+	u32 old_xscale, old_yscale;
-+	/* register access */
-+	u32 flip_addr;
-+	struct drm_i915_gem_object *reg_bo;
-+	/* flip handling */
-+	uint32_t last_flip_req;
-+	void (*flip_tail)(struct intel_overlay *);
-+};
- 
--static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-+static struct overlay_registers *
-+intel_overlay_map_regs(struct intel_overlay *overlay)
- {
-         drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- 	struct overlay_registers *regs;
- 
--	/* no recursive mappings */
--	BUG_ON(overlay->virt_addr);
-+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-+		regs = overlay->reg_bo->phys_obj->handle->vaddr;
-+	else
-+		regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
-+					 overlay->reg_bo->gtt_offset);
- 
--	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
--		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
--						overlay->reg_bo->gtt_offset,
--						KM_USER0);
-+	return regs;
-+}
- 
--		if (!regs) {
--			DRM_ERROR("failed to map overlay regs in GTT\n");
--			return NULL;
--		}
--	} else
--		regs = overlay->reg_bo->phys_obj->handle->vaddr;
-+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-+				     struct overlay_registers *regs)
-+{
-+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-+		io_mapping_unmap(regs);
-+}
- 
--	return overlay->virt_addr = regs;
-+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
-+					 struct drm_i915_gem_request *request,
-+					 bool interruptible,
-+					 void (*tail)(struct intel_overlay *))
-+{
-+	struct drm_device *dev = overlay->dev;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	int ret;
-+
-+	BUG_ON(overlay->last_flip_req);
-+	overlay->last_flip_req =
-+		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
-+	if (overlay->last_flip_req == 0)
-+		return -ENOMEM;
-+
-+	overlay->flip_tail = tail;
-+	ret = i915_do_wait_request(dev,
-+				   overlay->last_flip_req, true,
-+				   &dev_priv->render_ring);
-+	if (ret)
-+		return ret;
-+
-+	overlay->last_flip_req = 0;
-+	return 0;
- }
- 
--static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
-+/* Workaround for i830 bug where pipe a must be enable to change control regs */
-+static int
-+i830_activate_pipe_a(struct drm_device *dev)
- {
--	if (OVERLAY_NONPHYSICAL(overlay->dev))
--		io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct intel_crtc *crtc;
-+	struct drm_crtc_helper_funcs *crtc_funcs;
-+	struct drm_display_mode vesa_640x480 = {
-+		DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-+			 752, 800, 0, 480, 489, 492, 525, 0,
-+			 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
-+	}, *mode;
-+
-+	crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
-+	if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
-+		return 0;
- 
--	overlay->virt_addr = NULL;
-+	/* most i8xx have pipe a forced on, so don't trust dpms mode */
-+	if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
-+		return 0;
- 
--	return;
-+	crtc_funcs = crtc->base.helper_private;
-+	if (crtc_funcs->dpms == NULL)
-+		return 0;
-+
-+	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-+
-+	mode = drm_mode_duplicate(dev, &vesa_640x480);
-+	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-+	if(!drm_crtc_helper_set_mode(&crtc->base, mode,
-+				       crtc->base.x, crtc->base.y,
-+				       crtc->base.fb))
-+		return 0;
-+
-+	crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
-+	return 1;
-+}
-+
-+static void
-+i830_deactivate_pipe_a(struct drm_device *dev)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
-+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+
-+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- }
- 
- /* overlay needs to be disable in OCMD reg */
- static int intel_overlay_on(struct intel_overlay *overlay)
- {
- 	struct drm_device *dev = overlay->dev;
-+	struct drm_i915_gem_request *request;
-+	int pipe_a_quirk = 0;
- 	int ret;
--	drm_i915_private_t *dev_priv = dev->dev_private;
- 
- 	BUG_ON(overlay->active);
--
- 	overlay->active = 1;
--	overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
-+
-+	if (IS_I830(dev)) {
-+		pipe_a_quirk = i830_activate_pipe_a(dev);
-+		if (pipe_a_quirk < 0)
-+			return pipe_a_quirk;
-+	}
-+
-+	request = kzalloc(sizeof(*request), GFP_KERNEL);
-+	if (request == NULL) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
- 
- 	BEGIN_LP_RING(4);
- 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
- 	OUT_RING(MI_NOOP);
- 	ADVANCE_LP_RING();
- 
--	overlay->last_flip_req =
--		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
--	if (overlay->last_flip_req == 0)
--		return -ENOMEM;
--
--	ret = i915_do_wait_request(dev,
--			overlay->last_flip_req, 1, &dev_priv->render_ring);
--	if (ret != 0)
--		return ret;
-+	ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
-+out:
-+	if (pipe_a_quirk)
-+		i830_deactivate_pipe_a(dev);
- 
--	overlay->hw_wedged = 0;
--	overlay->last_flip_req = 0;
--	return 0;
-+	return ret;
- }
- 
- /* overlay needs to be enabled in OCMD reg */
--static void intel_overlay_continue(struct intel_overlay *overlay,
--			    bool load_polyphase_filter)
-+static int intel_overlay_continue(struct intel_overlay *overlay,
-+				  bool load_polyphase_filter)
- {
- 	struct drm_device *dev = overlay->dev;
-         drm_i915_private_t *dev_priv = dev->dev_private;
-+	struct drm_i915_gem_request *request;
- 	u32 flip_addr = overlay->flip_addr;
- 	u32 tmp;
- 
- 	BUG_ON(!overlay->active);
- 
-+	request = kzalloc(sizeof(*request), GFP_KERNEL);
-+	if (request == NULL)
-+		return -ENOMEM;
-+
- 	if (load_polyphase_filter)
- 		flip_addr |= OFC_UPDATE;
- 
-@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
-         ADVANCE_LP_RING();
- 
- 	overlay->last_flip_req =
--		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-+		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
-+	return 0;
- }
- 
--static int intel_overlay_wait_flip(struct intel_overlay *overlay)
-+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
- {
--	struct drm_device *dev = overlay->dev;
--        drm_i915_private_t *dev_priv = dev->dev_private;
--	int ret;
--	u32 tmp;
--
--	if (overlay->last_flip_req != 0) {
--		ret = i915_do_wait_request(dev, overlay->last_flip_req,
--				1, &dev_priv->render_ring);
--		if (ret == 0) {
--			overlay->last_flip_req = 0;
--
--			tmp = I915_READ(ISR);
-+	struct drm_gem_object *obj = &overlay->old_vid_bo->base;
- 
--			if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
--				return 0;
--		}
--	}
-+	i915_gem_object_unpin(obj);
-+	drm_gem_object_unreference(obj);
- 
--	/* synchronous slowpath */
--	overlay->hw_wedged = RELEASE_OLD_VID;
-+	overlay->old_vid_bo = NULL;
-+}
- 
--	BEGIN_LP_RING(2);
--        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
--        OUT_RING(MI_NOOP);
--        ADVANCE_LP_RING();
-+static void intel_overlay_off_tail(struct intel_overlay *overlay)
-+{
-+	struct drm_gem_object *obj;
- 
--	overlay->last_flip_req =
--		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
--	if (overlay->last_flip_req == 0)
--		return -ENOMEM;
-+	/* never have the overlay hw on without showing a frame */
-+	BUG_ON(!overlay->vid_bo);
-+	obj = &overlay->vid_bo->base;
- 
--	ret = i915_do_wait_request(dev, overlay->last_flip_req,
--			1, &dev_priv->render_ring);
--	if (ret != 0)
--		return ret;
-+	i915_gem_object_unpin(obj);
-+	drm_gem_object_unreference(obj);
-+	overlay->vid_bo = NULL;
- 
--	overlay->hw_wedged = 0;
--	overlay->last_flip_req = 0;
--	return 0;
-+	overlay->crtc->overlay = NULL;
-+	overlay->crtc = NULL;
-+	overlay->active = 0;
- }
- 
- /* overlay needs to be disabled in OCMD reg */
--static int intel_overlay_off(struct intel_overlay *overlay)
-+static int intel_overlay_off(struct intel_overlay *overlay,
-+			     bool interruptible)
- {
--	u32 flip_addr = overlay->flip_addr;
- 	struct drm_device *dev = overlay->dev;
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	int ret;
-+	u32 flip_addr = overlay->flip_addr;
-+	struct drm_i915_gem_request *request;
- 
- 	BUG_ON(!overlay->active);
- 
-+	request = kzalloc(sizeof(*request), GFP_KERNEL);
-+	if (request == NULL)
-+		return -ENOMEM;
-+
- 	/* According to intel docs the overlay hw may hang (when switching
- 	 * off) without loading the filter coeffs. It is however unclear whether
- 	 * this applies to the disabling of the overlay or to the switching off
- 	 * of the hw. Do it in both cases */
- 	flip_addr |= OFC_UPDATE;
- 
-+	BEGIN_LP_RING(6);
- 	/* wait for overlay to go idle */
--	overlay->hw_wedged = SWITCH_OFF_STAGE_1;
--
--	BEGIN_LP_RING(4);
- 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- 	OUT_RING(flip_addr);
--        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
--        OUT_RING(MI_NOOP);
--        ADVANCE_LP_RING();
--
--	overlay->last_flip_req =
--		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
--	if (overlay->last_flip_req == 0)
--		return -ENOMEM;
--
--	ret = i915_do_wait_request(dev, overlay->last_flip_req,
--			1, &dev_priv->render_ring);
--	if (ret != 0)
--		return ret;
--
-+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- 	/* turn overlay off */
--	overlay->hw_wedged = SWITCH_OFF_STAGE_2;
--
--	BEGIN_LP_RING(4);
--        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- 	OUT_RING(flip_addr);
--        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
--        OUT_RING(MI_NOOP);
-+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- 	ADVANCE_LP_RING();
- 
--	overlay->last_flip_req =
--		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
--	if (overlay->last_flip_req == 0)
--		return -ENOMEM;
--
--	ret = i915_do_wait_request(dev, overlay->last_flip_req,
--			1, &dev_priv->render_ring);
--	if (ret != 0)
--		return ret;
--
--	overlay->hw_wedged = 0;
--	overlay->last_flip_req = 0;
--	return ret;
--}
--
--static void intel_overlay_off_tail(struct intel_overlay *overlay)
--{
--	struct drm_gem_object *obj;
--
--	/* never have the overlay hw on without showing a frame */
--	BUG_ON(!overlay->vid_bo);
--	obj = &overlay->vid_bo->base;
--
--	i915_gem_object_unpin(obj);
--	drm_gem_object_unreference(obj);
--	overlay->vid_bo = NULL;
--
--	overlay->crtc->overlay = NULL;
--	overlay->crtc = NULL;
--	overlay->active = 0;
-+	return intel_overlay_do_wait_request(overlay, request, interruptible,
-+					     intel_overlay_off_tail);
- }
- 
- /* recover from an interruption due to a signal
-  * We have to be careful not to repeat work forever an make forward progess. */
--int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
--					 int interruptible)
-+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-+						bool interruptible)
- {
- 	struct drm_device *dev = overlay->dev;
--	struct drm_gem_object *obj;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	u32 flip_addr;
- 	int ret;
- 
--	if (overlay->hw_wedged == HW_WEDGED)
--		return -EIO;
--
--	if (overlay->last_flip_req == 0) {
--		overlay->last_flip_req =
--			i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
--		if (overlay->last_flip_req == 0)
--			return -ENOMEM;
--	}
-+	if (overlay->last_flip_req == 0)
-+		return 0;
- 
- 	ret = i915_do_wait_request(dev, overlay->last_flip_req,
--			interruptible, &dev_priv->render_ring);
--	if (ret != 0)
-+				   interruptible, &dev_priv->render_ring);
-+	if (ret)
- 		return ret;
- 
--	switch (overlay->hw_wedged) {
--		case RELEASE_OLD_VID:
--			obj = &overlay->old_vid_bo->base;
--			i915_gem_object_unpin(obj);
--			drm_gem_object_unreference(obj);
--			overlay->old_vid_bo = NULL;
--			break;
--		case SWITCH_OFF_STAGE_1:
--			flip_addr = overlay->flip_addr;
--			flip_addr |= OFC_UPDATE;
--
--			overlay->hw_wedged = SWITCH_OFF_STAGE_2;
--
--			BEGIN_LP_RING(4);
--			OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
--			OUT_RING(flip_addr);
--			OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
--			OUT_RING(MI_NOOP);
--			ADVANCE_LP_RING();
--
--			overlay->last_flip_req = i915_add_request(dev, NULL,
--					0, &dev_priv->render_ring);
--			if (overlay->last_flip_req == 0)
--				return -ENOMEM;
--
--			ret = i915_do_wait_request(dev, overlay->last_flip_req,
--					interruptible, &dev_priv->render_ring);
--			if (ret != 0)
--				return ret;
--
--		case SWITCH_OFF_STAGE_2:
--			intel_overlay_off_tail(overlay);
--			break;
--		default:
--			BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
--	}
-+	if (overlay->flip_tail)
-+		overlay->flip_tail(overlay);
- 
--	overlay->hw_wedged = 0;
- 	overlay->last_flip_req = 0;
- 	return 0;
- }
- 
- /* Wait for pending overlay flip and release old frame.
-  * Needs to be called before the overlay register are changed
-- * via intel_overlay_(un)map_regs_atomic */
-+ * via intel_overlay_(un)map_regs
-+ */
- static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
- {
-+	struct drm_device *dev = overlay->dev;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
- 	int ret;
--	struct drm_gem_object *obj;
- 
--	/* only wait if there is actually an old frame to release to
--	 * guarantee forward progress */
-+	/* Only wait if there is actually an old frame to release to
-+	 * guarantee forward progress.
-+	 */
- 	if (!overlay->old_vid_bo)
- 		return 0;
- 
--	ret = intel_overlay_wait_flip(overlay);
--	if (ret != 0)
--		return ret;
-+	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
-+		struct drm_i915_gem_request *request;
- 
--	obj = &overlay->old_vid_bo->base;
--	i915_gem_object_unpin(obj);
--	drm_gem_object_unreference(obj);
--	overlay->old_vid_bo = NULL;
-+		/* synchronous slowpath */
-+		request = kzalloc(sizeof(*request), GFP_KERNEL);
-+		if (request == NULL)
-+			return -ENOMEM;
-+
-+		BEGIN_LP_RING(2);
-+		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-+		OUT_RING(MI_NOOP);
-+		ADVANCE_LP_RING();
-+
-+		ret = intel_overlay_do_wait_request(overlay, request, true,
-+						    intel_overlay_release_old_vid_tail);
-+		if (ret)
-+			return ret;
-+	}
- 
-+	intel_overlay_release_old_vid_tail(overlay);
- 	return 0;
- }
- 
-@@ -506,65 +502,65 @@ struct put_image_params {
- static int packed_depth_bytes(u32 format)
- {
- 	switch (format & I915_OVERLAY_DEPTH_MASK) {
--		case I915_OVERLAY_YUV422:
--			return 4;
--		case I915_OVERLAY_YUV411:
--			/* return 6; not implemented */
--		default:
--			return -EINVAL;
-+	case I915_OVERLAY_YUV422:
-+		return 4;
-+	case I915_OVERLAY_YUV411:
-+		/* return 6; not implemented */
-+	default:
-+		return -EINVAL;
- 	}
- }
- 
- static int packed_width_bytes(u32 format, short width)
- {
- 	switch (format & I915_OVERLAY_DEPTH_MASK) {
--		case I915_OVERLAY_YUV422:
--			return width << 1;
--		default:
--			return -EINVAL;
-+	case I915_OVERLAY_YUV422:
-+		return width << 1;
-+	default:
-+		return -EINVAL;
- 	}
- }
- 
- static int uv_hsubsampling(u32 format)
- {
- 	switch (format & I915_OVERLAY_DEPTH_MASK) {
--		case I915_OVERLAY_YUV422:
--		case I915_OVERLAY_YUV420:
--			return 2;
--		case I915_OVERLAY_YUV411:
--		case I915_OVERLAY_YUV410:
--			return 4;
--		default:
--			return -EINVAL;
-+	case I915_OVERLAY_YUV422:
-+	case I915_OVERLAY_YUV420:
-+		return 2;
-+	case I915_OVERLAY_YUV411:
-+	case I915_OVERLAY_YUV410:
-+		return 4;
-+	default:
-+		return -EINVAL;
- 	}
- }
- 
- static int uv_vsubsampling(u32 format)
- {
- 	switch (format & I915_OVERLAY_DEPTH_MASK) {
--		case I915_OVERLAY_YUV420:
--		case I915_OVERLAY_YUV410:
--			return 2;
--		case I915_OVERLAY_YUV422:
--		case I915_OVERLAY_YUV411:
--			return 1;
--		default:
--			return -EINVAL;
-+	case I915_OVERLAY_YUV420:
-+	case I915_OVERLAY_YUV410:
-+		return 2;
-+	case I915_OVERLAY_YUV422:
-+	case I915_OVERLAY_YUV411:
-+		return 1;
-+	default:
-+		return -EINVAL;
- 	}
- }
- 
- static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
- {
- 	u32 mask, shift, ret;
--	if (IS_I9XX(dev)) {
--		mask = 0x3f;
--		shift = 6;
--	} else {
-+	if (IS_GEN2(dev)) {
- 		mask = 0x1f;
- 		shift = 5;
-+	} else {
-+		mask = 0x3f;
-+		shift = 6;
- 	}
- 	ret = ((offset + width + mask) >> shift) - (offset >> shift);
--	if (IS_I9XX(dev))
-+	if (!IS_GEN2(dev))
- 		ret <<= 1;
- 	ret -=1;
- 	return ret << 2;
-@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
- 	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
- 	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
- 	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
--	0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
-+	0xb000, 0x3000, 0x0800, 0x3000, 0xb000
-+};
-+
- static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
- 	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
- 	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
-@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
- 	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
- 	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
- 	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
--	0x3000, 0x0800, 0x3000};
-+	0x3000, 0x0800, 0x3000
-+};
- 
- static void update_polyphase_filter(struct overlay_registers *regs)
- {
-@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
- 		yscale = 1 << FP_SHIFT;
- 
- 	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
--		xscale_UV = xscale/uv_hscale;
--		yscale_UV = yscale/uv_vscale;
--		/* make the Y scale to UV scale ratio an exact multiply */
--		xscale = xscale_UV * uv_hscale;
--		yscale = yscale_UV * uv_vscale;
-+	xscale_UV = xscale/uv_hscale;
-+	yscale_UV = yscale/uv_vscale;
-+	/* make the Y scale to UV scale ratio an exact multiply */
-+	xscale = xscale_UV * uv_hscale;
-+	yscale = yscale_UV * uv_vscale;
- 	/*} else {
--		xscale_UV = 0;
--		yscale_UV = 0;
--	}*/
-+	  xscale_UV = 0;
-+	  yscale_UV = 0;
-+	  }*/
- 
- 	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
- 		scale_changed = true;
- 	overlay->old_xscale = xscale;
- 	overlay->old_yscale = yscale;
- 
--	regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
--		| ((xscale >> FP_SHIFT) << 16)
--		| ((xscale & FRACT_MASK) << 3);
--	regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
--		| ((xscale_UV >> FP_SHIFT) << 16)
--		| ((xscale_UV & FRACT_MASK) << 3);
--	regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
--		| ((yscale_UV >> FP_SHIFT) << 0);
-+	regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
-+			   ((xscale >> FP_SHIFT)  << 16) |
-+			   ((xscale & FRACT_MASK) << 3));
-+
-+	regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
-+			 ((xscale_UV >> FP_SHIFT)  << 16) |
-+			 ((xscale_UV & FRACT_MASK) << 3));
-+
-+	regs->UVSCALEV = ((((yscale    >> FP_SHIFT) << 16) |
-+			   ((yscale_UV >> FP_SHIFT) << 0)));
- 
- 	if (scale_changed)
- 		update_polyphase_filter(regs);
-@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
- 			    struct overlay_registers *regs)
- {
- 	u32 key = overlay->color_key;
-+
- 	switch (overlay->crtc->base.fb->bits_per_pixel) {
--		case 8:
--			regs->DCLRKV = 0;
--			regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
--		case 16:
--			if (overlay->crtc->base.fb->depth == 15) {
--				regs->DCLRKV = RGB15_TO_COLORKEY(key);
--				regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
--			} else {
--				regs->DCLRKV = RGB16_TO_COLORKEY(key);
--				regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
--			}
--		case 24:
--		case 32:
--			regs->DCLRKV = key;
--			regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
-+	case 8:
-+		regs->DCLRKV = 0;
-+		regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
-+		break;
-+
-+	case 16:
-+		if (overlay->crtc->base.fb->depth == 15) {
-+			regs->DCLRKV = RGB15_TO_COLORKEY(key);
-+			regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
-+		} else {
-+			regs->DCLRKV = RGB16_TO_COLORKEY(key);
-+			regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
-+		}
-+		break;
-+
-+	case 24:
-+	case 32:
-+		regs->DCLRKV = key;
-+		regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
-+		break;
- 	}
- }
- 
-@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
- 
- 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
- 		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
--			case I915_OVERLAY_YUV422:
--				cmd |= OCMD_YUV_422_PLANAR;
--				break;
--			case I915_OVERLAY_YUV420:
--				cmd |= OCMD_YUV_420_PLANAR;
--				break;
--			case I915_OVERLAY_YUV411:
--			case I915_OVERLAY_YUV410:
--				cmd |= OCMD_YUV_410_PLANAR;
--				break;
-+		case I915_OVERLAY_YUV422:
-+			cmd |= OCMD_YUV_422_PLANAR;
-+			break;
-+		case I915_OVERLAY_YUV420:
-+			cmd |= OCMD_YUV_420_PLANAR;
-+			break;
-+		case I915_OVERLAY_YUV411:
-+		case I915_OVERLAY_YUV410:
-+			cmd |= OCMD_YUV_410_PLANAR;
-+			break;
- 		}
- 	} else { /* YUV packed */
- 		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
--			case I915_OVERLAY_YUV422:
--				cmd |= OCMD_YUV_422_PACKED;
--				break;
--			case I915_OVERLAY_YUV411:
--				cmd |= OCMD_YUV_411_PACKED;
--				break;
-+		case I915_OVERLAY_YUV422:
-+			cmd |= OCMD_YUV_422_PACKED;
-+			break;
-+		case I915_OVERLAY_YUV411:
-+			cmd |= OCMD_YUV_411_PACKED;
-+			break;
- 		}
- 
- 		switch (params->format & I915_OVERLAY_SWAP_MASK) {
--			case I915_OVERLAY_NO_SWAP:
--				break;
--			case I915_OVERLAY_UV_SWAP:
--				cmd |= OCMD_UV_SWAP;
--				break;
--			case I915_OVERLAY_Y_SWAP:
--				cmd |= OCMD_Y_SWAP;
--				break;
--			case I915_OVERLAY_Y_AND_UV_SWAP:
--				cmd |= OCMD_Y_AND_UV_SWAP;
--				break;
-+		case I915_OVERLAY_NO_SWAP:
-+			break;
-+		case I915_OVERLAY_UV_SWAP:
-+			cmd |= OCMD_UV_SWAP;
-+			break;
-+		case I915_OVERLAY_Y_SWAP:
-+			cmd |= OCMD_Y_SWAP;
-+			break;
-+		case I915_OVERLAY_Y_AND_UV_SWAP:
-+			cmd |= OCMD_Y_AND_UV_SWAP;
-+			break;
- 		}
- 	}
- 
- 	return cmd;
- }
- 
--int intel_overlay_do_put_image(struct intel_overlay *overlay,
--			       struct drm_gem_object *new_bo,
--			       struct put_image_params *params)
-+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
-+				      struct drm_gem_object *new_bo,
-+				      struct put_image_params *params)
- {
- 	int ret, tmp_width;
- 	struct overlay_registers *regs;
-@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
- 		goto out_unpin;
- 
- 	if (!overlay->active) {
--		regs = intel_overlay_map_regs_atomic(overlay);
-+		regs = intel_overlay_map_regs(overlay);
- 		if (!regs) {
- 			ret = -ENOMEM;
- 			goto out_unpin;
- 		}
- 		regs->OCONFIG = OCONF_CC_OUT_8BIT;
--		if (IS_I965GM(overlay->dev))
-+		if (IS_GEN4(overlay->dev))
- 			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- 		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
- 			OCONF_PIPE_A : OCONF_PIPE_B;
--		intel_overlay_unmap_regs_atomic(overlay);
-+		intel_overlay_unmap_regs(overlay, regs);
- 
- 		ret = intel_overlay_on(overlay);
- 		if (ret != 0)
- 			goto out_unpin;
- 	}
- 
--	regs = intel_overlay_map_regs_atomic(overlay);
-+	regs = intel_overlay_map_regs(overlay);
- 	if (!regs) {
- 		ret = -ENOMEM;
- 		goto out_unpin;
-@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
- 
- 	regs->SWIDTH = params->src_w;
- 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
--			params->offset_Y, tmp_width);
-+				       params->offset_Y, tmp_width);
- 	regs->SHEIGHT = params->src_h;
- 	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
- 	regs->OSTRIDE = params->stride_Y;
-@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
- 		u32 tmp_U, tmp_V;
- 		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
- 		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
--				params->src_w/uv_hscale);
-+				      params->src_w/uv_hscale);
- 		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
--				params->src_w/uv_hscale);
-+				      params->src_w/uv_hscale);
- 		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- 		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- 		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
-@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
- 
- 	regs->OCMD = overlay_cmd_reg(params);
- 
--	intel_overlay_unmap_regs_atomic(overlay);
-+	intel_overlay_unmap_regs(overlay, regs);
- 
--	intel_overlay_continue(overlay, scale_changed);
-+	ret = intel_overlay_continue(overlay, scale_changed);
-+	if (ret)
-+		goto out_unpin;
- 
- 	overlay->old_vid_bo = overlay->vid_bo;
- 	overlay->vid_bo = to_intel_bo(new_bo);
-@@ -829,20 +838,19 @@ out_unpin:
- 	return ret;
- }
- 
--int intel_overlay_switch_off(struct intel_overlay *overlay)
-+int intel_overlay_switch_off(struct intel_overlay *overlay,
-+			     bool interruptible)
- {
--	int ret;
- 	struct overlay_registers *regs;
- 	struct drm_device *dev = overlay->dev;
-+	int ret;
- 
- 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- 	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- 
--	if (overlay->hw_wedged) {
--		ret = intel_overlay_recover_from_interrupt(overlay, 1);
--		if (ret != 0)
--			return ret;
--	}
-+	ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
-+	if (ret != 0)
-+		return ret;
- 
- 	if (!overlay->active)
- 		return 0;
-@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
- 	if (ret != 0)
- 		return ret;
- 
--	regs = intel_overlay_map_regs_atomic(overlay);
-+	regs = intel_overlay_map_regs(overlay);
- 	regs->OCMD = 0;
--	intel_overlay_unmap_regs_atomic(overlay);
-+	intel_overlay_unmap_regs(overlay, regs);
- 
--	ret = intel_overlay_off(overlay);
-+	ret = intel_overlay_off(overlay, interruptible);
- 	if (ret != 0)
- 		return ret;
- 
- 	intel_overlay_off_tail(overlay);
--
- 	return 0;
- }
- 
- static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
- 					  struct intel_crtc *crtc)
- {
--        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
--	u32 pipeconf;
--	int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
-+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- 
--	if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
-+	if (!crtc->active)
- 		return -EINVAL;
- 
--	pipeconf = I915_READ(pipeconf_reg);
--
- 	/* can't use the overlay with double wide pipe */
--	if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
-+	if (INTEL_INFO(overlay->dev)->gen < 4 &&
-+	    (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
- 		return -EINVAL;
- 
- 	return 0;
-@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
- static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
- {
- 	struct drm_device *dev = overlay->dev;
--        drm_i915_private_t *dev_priv = dev->dev_private;
--	u32 ratio;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
- 	u32 pfit_control = I915_READ(PFIT_CONTROL);
-+	u32 ratio;
- 
- 	/* XXX: This is not the same logic as in the xorg driver, but more in
--	 * line with the intel documentation for the i965 */
--	if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
--		ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
--	} else { /* on i965 use the PGM reg to read out the autoscaler values */
--		ratio = I915_READ(PFIT_PGM_RATIOS);
--		if (IS_I965G(dev))
--			ratio >>= PFIT_VERT_SCALE_SHIFT_965;
-+	 * line with the intel documentation for the i965
-+	 */
-+	if (INTEL_INFO(dev)->gen >= 4) {
-+	       	/* on i965 use the PGM reg to read out the autoscaler values */
-+		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
-+	} else {
-+		if (pfit_control & VERT_AUTO_SCALE)
-+			ratio = I915_READ(PFIT_AUTO_RATIOS);
- 		else
--			ratio >>= PFIT_VERT_SCALE_SHIFT;
-+			ratio = I915_READ(PFIT_PGM_RATIOS);
-+		ratio >>= PFIT_VERT_SCALE_SHIFT;
- 	}
- 
- 	overlay->pfit_vscale_ratio = ratio;
-@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
- {
- 	struct drm_display_mode *mode = &overlay->crtc->base.mode;
- 
--	if ((rec->dst_x < mode->crtc_hdisplay)
--	    && (rec->dst_x + rec->dst_width
--		    <= mode->crtc_hdisplay)
--	    && (rec->dst_y < mode->crtc_vdisplay)
--	    && (rec->dst_y + rec->dst_height
--		    <= mode->crtc_vdisplay))
-+	if (rec->dst_x < mode->crtc_hdisplay &&
-+	    rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
-+	    rec->dst_y < mode->crtc_vdisplay &&
-+	    rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
- 		return 0;
- 	else
- 		return -EINVAL;
-@@ -940,53 +944,59 @@ static int check_overlay_src(struct drm_device *dev,
- 			     struct drm_intel_overlay_put_image *rec,
- 			     struct drm_gem_object *new_bo)
- {
--	u32 stride_mask;
--	int depth;
- 	int uv_hscale = uv_hsubsampling(rec->flags);
- 	int uv_vscale = uv_vsubsampling(rec->flags);
--	size_t tmp;
-+	u32 stride_mask;
-+	int depth;
-+	u32 tmp;
- 
- 	/* check src dimensions */
- 	if (IS_845G(dev) || IS_I830(dev)) {
--		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
--		    || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
-+		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
-+		    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
- 			return -EINVAL;
- 	} else {
--		if (rec->src_height > IMAGE_MAX_HEIGHT
--		    || rec->src_width > IMAGE_MAX_WIDTH)
-+		if (rec->src_height > IMAGE_MAX_HEIGHT ||
-+		    rec->src_width  > IMAGE_MAX_WIDTH)
- 			return -EINVAL;
- 	}
-+
- 	/* better safe than sorry, use 4 as the maximal subsampling ratio */
--	if (rec->src_height < N_VERT_Y_TAPS*4
--	    || rec->src_width < N_HORIZ_Y_TAPS*4)
-+	if (rec->src_height < N_VERT_Y_TAPS*4 ||
-+	    rec->src_width  < N_HORIZ_Y_TAPS*4)
- 		return -EINVAL;
- 
- 	/* check alignment constraints */
- 	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
--		case I915_OVERLAY_RGB:
--			/* not implemented */
-+	case I915_OVERLAY_RGB:
-+		/* not implemented */
-+		return -EINVAL;
-+
-+	case I915_OVERLAY_YUV_PACKED:
-+		if (uv_vscale != 1)
- 			return -EINVAL;
--		case I915_OVERLAY_YUV_PACKED:
--			depth = packed_depth_bytes(rec->flags);
--			if (uv_vscale != 1)
--				return -EINVAL;
--			if (depth < 0)
--				return depth;
--			/* ignore UV planes */
--			rec->stride_UV = 0;
--			rec->offset_U = 0;
--			rec->offset_V = 0;
--			/* check pixel alignment */
--			if (rec->offset_Y % depth)
--				return -EINVAL;
--			break;
--		case I915_OVERLAY_YUV_PLANAR:
--			if (uv_vscale < 0 || uv_hscale < 0)
--				return -EINVAL;
--			/* no offset restrictions for planar formats */
--			break;
--		default:
-+
-+		depth = packed_depth_bytes(rec->flags);
-+		if (depth < 0)
-+			return depth;
-+
-+		/* ignore UV planes */
-+		rec->stride_UV = 0;
-+		rec->offset_U = 0;
-+		rec->offset_V = 0;
-+		/* check pixel alignment */
-+		if (rec->offset_Y % depth)
- 			return -EINVAL;
-+		break;
-+
-+	case I915_OVERLAY_YUV_PLANAR:
-+		if (uv_vscale < 0 || uv_hscale < 0)
-+			return -EINVAL;
-+		/* no offset restrictions for planar formats */
-+		break;
-+
-+	default:
-+		return -EINVAL;
- 	}
- 
- 	if (rec->src_width % uv_hscale)
-@@ -1000,47 +1010,74 @@ static int check_overlay_src(struct drm_device *dev,
- 
- 	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
- 		return -EINVAL;
--	if (IS_I965G(dev) && rec->stride_Y < 512)
-+	if (IS_GEN4(dev) && rec->stride_Y < 512)
- 		return -EINVAL;
- 
- 	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
--		4 : 8;
--	if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
-+		4096 : 8192;
-+	if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
- 		return -EINVAL;
- 
- 	/* check buffer dimensions */
- 	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
--		case I915_OVERLAY_RGB:
--		case I915_OVERLAY_YUV_PACKED:
--			/* always 4 Y values per depth pixels */
--			if (packed_width_bytes(rec->flags, rec->src_width)
--					> rec->stride_Y)
--				return -EINVAL;
--
--			tmp = rec->stride_Y*rec->src_height;
--			if (rec->offset_Y + tmp > new_bo->size)
--				return -EINVAL;
--			break;
--		case I915_OVERLAY_YUV_PLANAR:
--			if (rec->src_width > rec->stride_Y)
--				return -EINVAL;
--			if (rec->src_width/uv_hscale > rec->stride_UV)
--				return -EINVAL;
--
--			tmp = rec->stride_Y*rec->src_height;
--			if (rec->offset_Y + tmp > new_bo->size)
--				return -EINVAL;
--			tmp = rec->stride_UV*rec->src_height;
--			tmp /= uv_vscale;
--			if (rec->offset_U + tmp > new_bo->size
--			    || rec->offset_V + tmp > new_bo->size)
--				return -EINVAL;
--			break;
-+	case I915_OVERLAY_RGB:
-+	case I915_OVERLAY_YUV_PACKED:
-+		/* always 4 Y values per depth pixels */
-+		if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
-+			return -EINVAL;
-+
-+		tmp = rec->stride_Y*rec->src_height;
-+		if (rec->offset_Y + tmp > new_bo->size)
-+			return -EINVAL;
-+		break;
-+
-+	case I915_OVERLAY_YUV_PLANAR:
-+		if (rec->src_width > rec->stride_Y)
-+			return -EINVAL;
-+		if (rec->src_width/uv_hscale > rec->stride_UV)
-+			return -EINVAL;
-+
-+		tmp = rec->stride_Y * rec->src_height;
-+		if (rec->offset_Y + tmp > new_bo->size)
-+			return -EINVAL;
-+
-+		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
-+		if (rec->offset_U + tmp > new_bo->size ||
-+		    rec->offset_V + tmp > new_bo->size)
-+			return -EINVAL;
-+		break;
- 	}
- 
- 	return 0;
- }
- 
-+/**
-+ * Return the pipe currently connected to the panel fitter,
-+ * or -1 if the panel fitter is not present or not in use
-+ */
-+static int intel_panel_fitter_pipe(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32  pfit_control;
-+
-+	/* i830 doesn't have a panel fitter */
-+	if (IS_I830(dev))
-+		return -1;
-+
-+	pfit_control = I915_READ(PFIT_CONTROL);
-+
-+	/* See if the panel fitter is in use */
-+	if ((pfit_control & PFIT_ENABLE) == 0)
-+		return -1;
-+
-+	/* 965 can place panel fitter on either pipe */
-+	if (IS_GEN4(dev))
-+		return (pfit_control >> 29) & 0x3;
-+
-+	/* older chips can only use pipe 1 */
-+	return 1;
-+}
-+
- int intel_overlay_put_image(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
- {
-@@ -1068,7 +1105,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 		mutex_lock(&dev->mode_config.mutex);
- 		mutex_lock(&dev->struct_mutex);
- 
--		ret = intel_overlay_switch_off(overlay);
-+		ret = intel_overlay_switch_off(overlay, true);
- 
- 		mutex_unlock(&dev->struct_mutex);
- 		mutex_unlock(&dev->mode_config.mutex);
-@@ -1081,7 +1118,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 		return -ENOMEM;
- 
- 	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
--                        DRM_MODE_OBJECT_CRTC);
-+					   DRM_MODE_OBJECT_CRTC);
- 	if (!drmmode_obj) {
- 		ret = -ENOENT;
- 		goto out_free;
-@@ -1089,7 +1126,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- 
- 	new_bo = drm_gem_object_lookup(dev, file_priv,
--			put_image_rec->bo_handle);
-+				       put_image_rec->bo_handle);
- 	if (!new_bo) {
- 		ret = -ENOENT;
- 		goto out_free;
-@@ -1098,15 +1135,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 	mutex_lock(&dev->mode_config.mutex);
- 	mutex_lock(&dev->struct_mutex);
- 
--	if (overlay->hw_wedged) {
--		ret = intel_overlay_recover_from_interrupt(overlay, 1);
--		if (ret != 0)
--			goto out_unlock;
--	}
-+	ret = intel_overlay_recover_from_interrupt(overlay, true);
-+	if (ret != 0)
-+		goto out_unlock;
- 
- 	if (overlay->crtc != crtc) {
- 		struct drm_display_mode *mode = &crtc->base.mode;
--		ret = intel_overlay_switch_off(overlay);
-+		ret = intel_overlay_switch_off(overlay, true);
- 		if (ret != 0)
- 			goto out_unlock;
- 
-@@ -1117,9 +1152,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 		overlay->crtc = crtc;
- 		crtc->overlay = overlay;
- 
--		if (intel_panel_fitter_pipe(dev) == crtc->pipe
--		    /* and line to wide, i.e. one-line-mode */
--		    && mode->hdisplay > 1024) {
-+		/* line too wide, i.e. one-line-mode */
-+		if (mode->hdisplay > 1024 &&
-+		    intel_panel_fitter_pipe(dev) == crtc->pipe) {
- 			overlay->pfit_active = 1;
- 			update_pfit_vscale_ratio(overlay);
- 		} else
-@@ -1132,10 +1167,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 
- 	if (overlay->pfit_active) {
- 		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
--			overlay->pfit_vscale_ratio);
-+				 overlay->pfit_vscale_ratio);
- 		/* shifting right rounds downwards, so add 1 */
- 		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
--			overlay->pfit_vscale_ratio) + 1;
-+				 overlay->pfit_vscale_ratio) + 1;
- 	} else {
- 		params->dst_y = put_image_rec->dst_y;
- 		params->dst_h = put_image_rec->dst_height;
-@@ -1147,8 +1182,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
- 	params->src_h = put_image_rec->src_height;
- 	params->src_scan_w = put_image_rec->src_scan_width;
- 	params->src_scan_h = put_image_rec->src_scan_height;
--	if (params->src_scan_h > params->src_h
--	    || params->src_scan_w > params->src_w) {
-+	if (params->src_scan_h > params->src_h ||
-+	    params->src_scan_w > params->src_w) {
- 		ret = -EINVAL;
- 		goto out_unlock;
- 	}
-@@ -1204,7 +1239,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
- 		return false;
- 
- 	for (i = 0; i < 3; i++) {
--		if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
-+		if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
- 			return false;
- 	}
- 
-@@ -1225,16 +1260,18 @@ static bool check_gamma5_errata(u32 gamma5)
- 
- static int check_gamma(struct drm_intel_overlay_attrs *attrs)
- {
--	if (!check_gamma_bounds(0, attrs->gamma0)
--	    || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
--	    || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
--	    || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
--	    || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
--	    || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
--	    || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
-+	if (!check_gamma_bounds(0, attrs->gamma0) ||
-+	    !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
-+	    !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
-+	    !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
-+	    !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
-+	    !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
-+	    !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
- 		return -EINVAL;
-+
- 	if (!check_gamma5_errata(attrs->gamma5))
- 		return -EINVAL;
-+
- 	return 0;
- }
- 
-@@ -1261,13 +1298,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
- 	mutex_lock(&dev->mode_config.mutex);
- 	mutex_lock(&dev->struct_mutex);
- 
-+	ret = -EINVAL;
- 	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
--		attrs->color_key = overlay->color_key;
-+		attrs->color_key  = overlay->color_key;
- 		attrs->brightness = overlay->brightness;
--		attrs->contrast = overlay->contrast;
-+		attrs->contrast   = overlay->contrast;
- 		attrs->saturation = overlay->saturation;
- 
--		if (IS_I9XX(dev)) {
-+		if (!IS_GEN2(dev)) {
- 			attrs->gamma0 = I915_READ(OGAMC0);
- 			attrs->gamma1 = I915_READ(OGAMC1);
- 			attrs->gamma2 = I915_READ(OGAMC2);
-@@ -1275,29 +1313,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
- 			attrs->gamma4 = I915_READ(OGAMC4);
- 			attrs->gamma5 = I915_READ(OGAMC5);
- 		}
--		ret = 0;
- 	} else {
--		overlay->color_key = attrs->color_key;
--		if (attrs->brightness >= -128 && attrs->brightness <= 127) {
--			overlay->brightness = attrs->brightness;
--		} else {
--			ret = -EINVAL;
-+		if (attrs->brightness < -128 || attrs->brightness > 127)
- 			goto out_unlock;
--		}
--		if (attrs->contrast <= 255) {
--			overlay->contrast = attrs->contrast;
--		} else {
--			ret = -EINVAL;
-+		if (attrs->contrast > 255)
- 			goto out_unlock;
--		}
--		if (attrs->saturation <= 1023) {
--			overlay->saturation = attrs->saturation;
--		} else {
--			ret = -EINVAL;
-+		if (attrs->saturation > 1023)
- 			goto out_unlock;
--		}
- 
--		regs = intel_overlay_map_regs_atomic(overlay);
-+		overlay->color_key  = attrs->color_key;
-+		overlay->brightness = attrs->brightness;
-+		overlay->contrast   = attrs->contrast;
-+		overlay->saturation = attrs->saturation;
-+
-+		regs = intel_overlay_map_regs(overlay);
- 		if (!regs) {
- 			ret = -ENOMEM;
- 			goto out_unlock;
-@@ -1305,13 +1334,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
- 
- 		update_reg_attrs(overlay, regs);
- 
--		intel_overlay_unmap_regs_atomic(overlay);
-+		intel_overlay_unmap_regs(overlay, regs);
- 
- 		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
--			if (!IS_I9XX(dev)) {
--				ret = -EINVAL;
-+			if (IS_GEN2(dev))
- 				goto out_unlock;
--			}
- 
- 			if (overlay->active) {
- 				ret = -EBUSY;
-@@ -1319,7 +1346,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
- 			}
- 
- 			ret = check_gamma(attrs);
--			if (ret != 0)
-+			if (ret)
- 				goto out_unlock;
- 
- 			I915_WRITE(OGAMC0, attrs->gamma0);
-@@ -1329,9 +1356,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
- 			I915_WRITE(OGAMC4, attrs->gamma4);
- 			I915_WRITE(OGAMC5, attrs->gamma5);
- 		}
--		ret = 0;
- 	}
- 
-+	ret = 0;
- out_unlock:
- 	mutex_unlock(&dev->struct_mutex);
- 	mutex_unlock(&dev->mode_config.mutex);
-@@ -1347,7 +1374,7 @@ void intel_setup_overlay(struct drm_device *dev)
- 	struct overlay_registers *regs;
- 	int ret;
- 
--	if (!OVERLAY_EXISTS(dev))
-+	if (!HAS_OVERLAY(dev))
- 		return;
- 
- 	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
-@@ -1360,22 +1387,28 @@ void intel_setup_overlay(struct drm_device *dev)
- 		goto out_free;
- 	overlay->reg_bo = to_intel_bo(reg_bo);
- 
--	if (OVERLAY_NONPHYSICAL(dev)) {
--		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
--		if (ret) {
--                        DRM_ERROR("failed to pin overlay register bo\n");
--                        goto out_free_bo;
--                }
--		overlay->flip_addr = overlay->reg_bo->gtt_offset;
--	} else {
-+	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
- 		ret = i915_gem_attach_phys_object(dev, reg_bo,
- 						  I915_GEM_PHYS_OVERLAY_REGS,
--						  0);
-+						  PAGE_SIZE);
-                 if (ret) {
-                         DRM_ERROR("failed to attach phys overlay regs\n");
-                         goto out_free_bo;
-                 }
- 		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
-+	} else {
-+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
-+		if (ret) {
-+                        DRM_ERROR("failed to pin overlay register bo\n");
-+                        goto out_free_bo;
-+                }
-+		overlay->flip_addr = overlay->reg_bo->gtt_offset;
-+
-+		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
-+		if (ret) {
-+                        DRM_ERROR("failed to move overlay register bo into the GTT\n");
-+                        goto out_unpin_bo;
-+                }
- 	}
- 
- 	/* init all values */
-@@ -1384,21 +1417,22 @@ void intel_setup_overlay(struct drm_device *dev)
- 	overlay->contrast = 75;
- 	overlay->saturation = 146;
- 
--	regs = intel_overlay_map_regs_atomic(overlay);
-+	regs = intel_overlay_map_regs(overlay);
- 	if (!regs)
- 		goto out_free_bo;
- 
- 	memset(regs, 0, sizeof(struct overlay_registers));
- 	update_polyphase_filter(regs);
--
- 	update_reg_attrs(overlay, regs);
- 
--	intel_overlay_unmap_regs_atomic(overlay);
-+	intel_overlay_unmap_regs(overlay, regs);
- 
- 	dev_priv->overlay = overlay;
- 	DRM_INFO("initialized overlay support\n");
- 	return;
- 
-+out_unpin_bo:
-+	i915_gem_object_unpin(reg_bo);
- out_free_bo:
- 	drm_gem_object_unreference(reg_bo);
- out_free:
-@@ -1408,18 +1442,23 @@ out_free:
- 
- void intel_cleanup_overlay(struct drm_device *dev)
- {
--        drm_i915_private_t *dev_priv = dev->dev_private;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
- 
--	if (dev_priv->overlay) {
--		/* The bo's should be free'd by the generic code already.
--		 * Furthermore modesetting teardown happens beforehand so the
--		 * hardware should be off already */
--		BUG_ON(dev_priv->overlay->active);
-+	if (!dev_priv->overlay)
-+		return;
- 
--		kfree(dev_priv->overlay);
--	}
-+	/* The bo's should be free'd by the generic code already.
-+	 * Furthermore modesetting teardown happens beforehand so the
-+	 * hardware should be off already */
-+	BUG_ON(dev_priv->overlay->active);
-+
-+	drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
-+	kfree(dev_priv->overlay);
- }
- 
-+#ifdef CONFIG_DEBUG_FS
-+#include <linux/seq_file.h>
-+
- struct intel_overlay_error_state {
- 	struct overlay_registers regs;
- 	unsigned long base;
-@@ -1427,6 +1466,30 @@ struct intel_overlay_error_state {
- 	u32 isr;
- };
- 
-+static struct overlay_registers *
-+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-+{
-+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-+	struct overlay_registers *regs;
-+
-+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-+		regs = overlay->reg_bo->phys_obj->handle->vaddr;
-+	else
-+		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-+						overlay->reg_bo->gtt_offset,
-+						KM_USER0);
-+
-+	return regs;
-+}
-+
-+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-+					    struct overlay_registers *regs)
-+{
-+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-+		io_mapping_unmap_atomic(regs, KM_USER0);
-+}
-+
-+
- struct intel_overlay_error_state *
- intel_overlay_capture_error_state(struct drm_device *dev)
- {
-@@ -1444,17 +1507,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
- 
- 	error->dovsta = I915_READ(DOVSTA);
- 	error->isr = I915_READ(ISR);
--	if (OVERLAY_NONPHYSICAL(overlay->dev))
--		error->base = (long) overlay->reg_bo->gtt_offset;
--	else
-+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- 		error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
-+	else
-+		error->base = (long) overlay->reg_bo->gtt_offset;
- 
- 	regs = intel_overlay_map_regs_atomic(overlay);
- 	if (!regs)
- 		goto err;
- 
- 	memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
--	intel_overlay_unmap_regs_atomic(overlay);
-+	intel_overlay_unmap_regs_atomic(overlay, regs);
- 
- 	return error;
- 
-@@ -1515,3 +1578,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
- 	P(UVSCALEV);
- #undef P
- }
-+#endif
-diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
-index e7f5299..92ff8f3 100644
---- a/drivers/gpu/drm/i915/intel_panel.c
-+++ b/drivers/gpu/drm/i915/intel_panel.c
-@@ -30,6 +30,8 @@
- 
- #include "intel_drv.h"
- 
-+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-+
- void
- intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- 		       struct drm_display_mode *adjusted_mode)
-@@ -109,3 +111,110 @@ done:
- 	dev_priv->pch_pf_pos = (x << 16) | y;
- 	dev_priv->pch_pf_size = (width << 16) | height;
- }
-+
-+static int is_backlight_combination_mode(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+	if (INTEL_INFO(dev)->gen >= 4)
-+		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-+
-+	if (IS_GEN2(dev))
-+		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-+
-+	return 0;
-+}
-+
-+u32 intel_panel_get_max_backlight(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 max;
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
-+	} else {
-+		max = I915_READ(BLC_PWM_CTL);
-+		if (IS_PINEVIEW(dev)) {
-+			max >>= 17;
-+		} else {
-+			max >>= 16;
-+			if (INTEL_INFO(dev)->gen < 4)
-+				max &= ~1;
-+		}
-+
-+		if (is_backlight_combination_mode(dev))
-+			max *= 0xff;
-+	}
-+
-+	if (max == 0) {
-+		/* XXX add code here to query mode clock or hardware clock
-+		 * and program max PWM appropriately.
-+		 */
-+		DRM_ERROR("fixme: max PWM is zero.\n");
-+		max = 1;
-+	}
-+
-+	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-+	return max;
-+}
-+
-+u32 intel_panel_get_backlight(struct drm_device *dev)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 val;
-+
-+	if (HAS_PCH_SPLIT(dev)) {
-+		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-+	} else {
-+		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-+		if (IS_PINEVIEW(dev))
-+			val >>= 1;
-+
-+		if (is_backlight_combination_mode(dev)){
-+			u8 lbpc;
-+
-+			val &= ~1;
-+			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
-+			val *= lbpc;
-+			val >>= 1;
-+		}
-+	}
-+
-+	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
-+	return val;
-+}
-+
-+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-+	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
-+}
-+
-+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
-+{
-+	struct drm_i915_private *dev_priv = dev->dev_private;
-+	u32 tmp;
-+
-+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-+
-+	if (HAS_PCH_SPLIT(dev))
-+		return intel_pch_panel_set_backlight(dev, level);
-+
-+	if (is_backlight_combination_mode(dev)){
-+		u32 max = intel_panel_get_max_backlight(dev);
-+		u8 lpbc;
-+
-+		lpbc = level * 0xfe / max + 1;
-+		level /= lpbc;
-+		pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
-+	}
-+
-+	tmp = I915_READ(BLC_PWM_CTL);
-+	if (IS_PINEVIEW(dev)) {
-+		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
-+		level <<= 1;
-+	} else
-+		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-+	I915_WRITE(BLC_PWM_CTL, tmp | level);
-+}
-diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
-index cb3508f..b83306f 100644
---- a/drivers/gpu/drm/i915/intel_ringbuffer.c
-+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
-@@ -32,6 +32,7 @@
- #include "i915_drv.h"
- #include "i915_drm.h"
- #include "i915_trace.h"
-+#include "intel_drv.h"
- 
- static u32 i915_gem_get_seqno(struct drm_device *dev)
- {
-@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
- 
- static void
- render_ring_flush(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		u32	invalidate_domains,
--		u32	flush_domains)
-+		  struct intel_ring_buffer *ring,
-+		  u32	invalidate_domains,
-+		  u32	flush_domains)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	u32 cmd;
-@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
- 		if ((invalidate_domains|flush_domains) &
- 		    I915_GEM_DOMAIN_RENDER)
- 			cmd &= ~MI_NO_WRITE_FLUSH;
--		if (!IS_I965G(dev)) {
-+		if (INTEL_INFO(dev)->gen < 4) {
- 			/*
- 			 * On the 965, the sampler cache always gets flushed
- 			 * and this bit is reserved.
-@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
- 	}
- }
- 
--static unsigned int render_ring_get_head(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	return I915_READ(PRB0_HEAD) & HEAD_ADDR;
--}
--
--static unsigned int render_ring_get_tail(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+static void ring_write_tail(struct drm_device *dev,
-+			    struct intel_ring_buffer *ring,
-+			    u32 value)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	return I915_READ(PRB0_TAIL) & TAIL_ADDR;
-+	I915_WRITE_TAIL(ring, value);
- }
- 
--static unsigned int render_ring_get_active_head(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+u32 intel_ring_get_active_head(struct drm_device *dev,
-+			       struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
--	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-+	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
-+			RING_ACTHD(ring->mmio_base) : ACTHD;
- 
- 	return I915_READ(acthd_reg);
- }
- 
--static void render_ring_advance_ring(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	I915_WRITE(PRB0_TAIL, ring->tail);
--}
--
- static int init_ring_common(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			    struct intel_ring_buffer *ring)
- {
- 	u32 head;
- 	drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
- 	obj_priv = to_intel_bo(ring->gem_object);
- 
- 	/* Stop the ring if it's running. */
--	I915_WRITE(ring->regs.ctl, 0);
--	I915_WRITE(ring->regs.head, 0);
--	I915_WRITE(ring->regs.tail, 0);
-+	I915_WRITE_CTL(ring, 0);
-+	I915_WRITE_HEAD(ring, 0);
-+	ring->write_tail(dev, ring, 0);
- 
- 	/* Initialize the ring. */
--	I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
--	head = ring->get_head(dev, ring);
-+	I915_WRITE_START(ring, obj_priv->gtt_offset);
-+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
- 
- 	/* G45 ring initialization fails to reset head to zero */
- 	if (head != 0) {
- 		DRM_ERROR("%s head not reset to zero "
- 				"ctl %08x head %08x tail %08x start %08x\n",
- 				ring->name,
--				I915_READ(ring->regs.ctl),
--				I915_READ(ring->regs.head),
--				I915_READ(ring->regs.tail),
--				I915_READ(ring->regs.start));
-+				I915_READ_CTL(ring),
-+				I915_READ_HEAD(ring),
-+				I915_READ_TAIL(ring),
-+				I915_READ_START(ring));
- 
--		I915_WRITE(ring->regs.head, 0);
-+		I915_WRITE_HEAD(ring, 0);
- 
- 		DRM_ERROR("%s head forced to zero "
- 				"ctl %08x head %08x tail %08x start %08x\n",
- 				ring->name,
--				I915_READ(ring->regs.ctl),
--				I915_READ(ring->regs.head),
--				I915_READ(ring->regs.tail),
--				I915_READ(ring->regs.start));
-+				I915_READ_CTL(ring),
-+				I915_READ_HEAD(ring),
-+				I915_READ_TAIL(ring),
-+				I915_READ_START(ring));
- 	}
- 
--	I915_WRITE(ring->regs.ctl,
-+	I915_WRITE_CTL(ring,
- 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
--			| RING_NO_REPORT | RING_VALID);
-+			| RING_REPORT_64K | RING_VALID);
- 
--	head = I915_READ(ring->regs.head) & HEAD_ADDR;
-+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
- 	/* If the head is still not zero, the ring is dead */
- 	if (head != 0) {
- 		DRM_ERROR("%s initialization failed "
- 				"ctl %08x head %08x tail %08x start %08x\n",
- 				ring->name,
--				I915_READ(ring->regs.ctl),
--				I915_READ(ring->regs.head),
--				I915_READ(ring->regs.tail),
--				I915_READ(ring->regs.start));
-+				I915_READ_CTL(ring),
-+				I915_READ_HEAD(ring),
-+				I915_READ_TAIL(ring),
-+				I915_READ_START(ring));
- 		return -EIO;
- 	}
- 
- 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
- 		i915_kernel_lost_context(dev);
- 	else {
--		ring->head = ring->get_head(dev, ring);
--		ring->tail = ring->get_tail(dev, ring);
-+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- 		ring->space = ring->head - (ring->tail + 8);
- 		if (ring->space < 0)
- 			ring->space += ring->size;
-@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
- }
- 
- static int init_render_ring(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			    struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	int ret = init_ring_common(dev, ring);
- 	int mode;
- 
--	if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-+	if (INTEL_INFO(dev)->gen > 3) {
- 		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- 		if (IS_GEN6(dev))
- 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
-@@ -250,9 +239,8 @@ do {									\
-  */
- static u32
- render_ring_add_request(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		struct drm_file *file_priv,
--		u32 flush_domains)
-+			struct intel_ring_buffer *ring,
-+			u32 flush_domains)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	u32 seqno;
-@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
- }
- 
- static u32
--render_ring_get_gem_seqno(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+render_ring_get_seqno(struct drm_device *dev,
-+		      struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	if (HAS_PIPE_CONTROL(dev))
-@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
- 
- static void
- render_ring_get_user_irq(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			 struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	unsigned long irqflags;
-@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
- 
- static void
- render_ring_put_user_irq(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			 struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- 	unsigned long irqflags;
-@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
- 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
- }
- 
--static void render_setup_status_page(struct drm_device *dev,
--	struct	intel_ring_buffer *ring)
-+void intel_ring_setup_status_page(struct drm_device *dev,
-+				  struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	if (IS_GEN6(dev)) {
--		I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
--		I915_READ(HWS_PGA_GEN6); /* posting read */
-+		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
-+			   ring->status_page.gfx_addr);
-+		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
- 	} else {
--		I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
--		I915_READ(HWS_PGA); /* posting read */
-+		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-+			   ring->status_page.gfx_addr);
-+		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
- 	}
- 
- }
- 
--void
-+static void
- bsd_ring_flush(struct drm_device *dev,
- 		struct intel_ring_buffer *ring,
- 		u32     invalidate_domains,
-@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
- 	intel_ring_advance(dev, ring);
- }
- 
--static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
--}
--
--static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
--}
--
--static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	return I915_READ(BSD_RING_ACTHD);
--}
--
--static inline void bsd_ring_advance_ring(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	I915_WRITE(BSD_RING_TAIL, ring->tail);
--}
--
- static int init_bsd_ring(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			 struct intel_ring_buffer *ring)
- {
- 	return init_ring_common(dev, ring);
- }
- 
- static u32
--bsd_ring_add_request(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		struct drm_file *file_priv,
--		u32 flush_domains)
-+ring_add_request(struct drm_device *dev,
-+		 struct intel_ring_buffer *ring,
-+		 u32 flush_domains)
- {
- 	u32 seqno;
- 
-@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
- 	return seqno;
- }
- 
--static void bsd_setup_status_page(struct drm_device *dev,
--		struct  intel_ring_buffer *ring)
--{
--	drm_i915_private_t *dev_priv = dev->dev_private;
--	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
--	I915_READ(BSD_HWS_PGA);
--}
--
- static void
- bsd_ring_get_user_irq(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+		      struct intel_ring_buffer *ring)
- {
- 	/* do nothing */
- }
- static void
- bsd_ring_put_user_irq(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+		      struct intel_ring_buffer *ring)
- {
- 	/* do nothing */
- }
- 
- static u32
--bsd_ring_get_gem_seqno(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+ring_status_page_get_seqno(struct drm_device *dev,
-+			   struct intel_ring_buffer *ring)
- {
- 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
- }
- 
- static int
--bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		struct drm_i915_gem_execbuffer2 *exec,
--		struct drm_clip_rect *cliprects,
--		uint64_t exec_offset)
-+ring_dispatch_gem_execbuffer(struct drm_device *dev,
-+			     struct intel_ring_buffer *ring,
-+			     struct drm_i915_gem_execbuffer2 *exec,
-+			     struct drm_clip_rect *cliprects,
-+			     uint64_t exec_offset)
- {
- 	uint32_t exec_start;
- 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- 	return 0;
- }
- 
--
- static int
- render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		struct drm_i915_gem_execbuffer2 *exec,
--		struct drm_clip_rect *cliprects,
--		uint64_t exec_offset)
-+				    struct intel_ring_buffer *ring,
-+				    struct drm_i915_gem_execbuffer2 *exec,
-+				    struct drm_clip_rect *cliprects,
-+				    uint64_t exec_offset)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	int nbox = exec->num_cliprects;
-@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- 			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
- 			intel_ring_emit(dev, ring, 0);
- 		} else {
--			intel_ring_begin(dev, ring, 4);
--			if (IS_I965G(dev)) {
-+			intel_ring_begin(dev, ring, 2);
-+			if (INTEL_INFO(dev)->gen >= 4) {
- 				intel_ring_emit(dev, ring,
- 						MI_BATCH_BUFFER_START | (2 << 6)
- 						| MI_BATCH_NON_SECURE_I965);
-@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- 		intel_ring_advance(dev, ring);
- 	}
- 
--	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
-+	if (IS_G4X(dev) || IS_GEN5(dev)) {
- 		intel_ring_begin(dev, ring, 2);
- 		intel_ring_emit(dev, ring, MI_FLUSH |
- 				MI_NO_WRITE_FLUSH |
-@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- }
- 
- static void cleanup_status_page(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+				struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_gem_object *obj;
-@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
- }
- 
- static int init_status_page(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			    struct intel_ring_buffer *ring)
- {
- 	drm_i915_private_t *dev_priv = dev->dev_private;
- 	struct drm_gem_object *obj;
-@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
- 	ring->status_page.obj = obj;
- 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- 
--	ring->setup_status_page(dev, ring);
-+	intel_ring_setup_status_page(dev, ring);
- 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- 			ring->name, ring->status_page.gfx_addr);
- 
-@@ -617,15 +569,18 @@ err:
- 	return ret;
- }
- 
--
- int intel_init_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			   struct intel_ring_buffer *ring)
- {
-+	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_i915_gem_object *obj_priv;
- 	struct drm_gem_object *obj;
- 	int ret;
- 
- 	ring->dev = dev;
-+	INIT_LIST_HEAD(&ring->active_list);
-+	INIT_LIST_HEAD(&ring->request_list);
-+	INIT_LIST_HEAD(&ring->gpu_write_list);
- 
- 	if (I915_NEED_GFX_HWS(dev)) {
- 		ret = init_status_page(dev, ring);
-@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
- 
- 	ring->gem_object = obj;
- 
--	ret = i915_gem_object_pin(obj, ring->alignment);
-+	ret = i915_gem_object_pin(obj, PAGE_SIZE);
- 	if (ret)
- 		goto err_unref;
- 
-@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
- 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
- 		i915_kernel_lost_context(dev);
- 	else {
--		ring->head = ring->get_head(dev, ring);
--		ring->tail = ring->get_tail(dev, ring);
-+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- 		ring->space = ring->head - (ring->tail + 8);
- 		if (ring->space < 0)
- 			ring->space += ring->size;
- 	}
--	INIT_LIST_HEAD(&ring->active_list);
--	INIT_LIST_HEAD(&ring->request_list);
- 	return ret;
- 
- err_unmap:
-@@ -691,7 +644,7 @@ err_hws:
- }
- 
- void intel_cleanup_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			       struct intel_ring_buffer *ring)
- {
- 	if (ring->gem_object == NULL)
- 		return;
-@@ -701,11 +654,15 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
- 	i915_gem_object_unpin(ring->gem_object);
- 	drm_gem_object_unreference(ring->gem_object);
- 	ring->gem_object = NULL;
-+
-+	if (ring->cleanup)
-+		ring->cleanup(ring);
-+
- 	cleanup_status_page(dev, ring);
- }
- 
--int intel_wrap_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+static int intel_wrap_ring_buffer(struct drm_device *dev,
-+				  struct intel_ring_buffer *ring)
- {
- 	unsigned int *virt;
- 	int rem;
-@@ -731,14 +688,26 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
- }
- 
- int intel_wait_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring, int n)
-+			   struct intel_ring_buffer *ring, int n)
- {
- 	unsigned long end;
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+	u32 head;
-+
-+	head = intel_read_status_page(ring, 4);
-+	if (head) {
-+		ring->head = head & HEAD_ADDR;
-+		ring->space = ring->head - (ring->tail + 8);
-+		if (ring->space < 0)
-+			ring->space += ring->size;
-+		if (ring->space >= n)
-+			return 0;
-+	}
- 
- 	trace_i915_ring_wait_begin (dev);
- 	end = jiffies + 3 * HZ;
- 	do {
--		ring->head = ring->get_head(dev, ring);
-+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- 		ring->space = ring->head - (ring->tail + 8);
- 		if (ring->space < 0)
- 			ring->space += ring->size;
-@@ -753,14 +722,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
- 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- 		}
- 
--		yield();
-+		msleep(1);
- 	} while (!time_after(jiffies, end));
- 	trace_i915_ring_wait_end (dev);
- 	return -EBUSY;
- }
- 
- void intel_ring_begin(struct drm_device *dev,
--		struct intel_ring_buffer *ring, int num_dwords)
-+		      struct intel_ring_buffer *ring,
-+		      int num_dwords)
- {
- 	int n = 4*num_dwords;
- 	if (unlikely(ring->tail + n > ring->size))
-@@ -772,97 +742,287 @@ void intel_ring_begin(struct drm_device *dev,
- }
- 
- void intel_ring_advance(struct drm_device *dev,
--		struct intel_ring_buffer *ring)
-+			struct intel_ring_buffer *ring)
- {
- 	ring->tail &= ring->size - 1;
--	ring->advance_ring(dev, ring);
-+	ring->write_tail(dev, ring, ring->tail);
- }
- 
--void intel_fill_struct(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		void *data,
--		unsigned int len)
--{
--	unsigned int *virt = ring->virtual_start + ring->tail;
--	BUG_ON((len&~(4-1)) != 0);
--	intel_ring_begin(dev, ring, len/4);
--	memcpy(virt, data, len);
--	ring->tail += len;
--	ring->tail &= ring->size - 1;
--	ring->space -= len;
--	intel_ring_advance(dev, ring);
--}
--
--struct intel_ring_buffer render_ring = {
-+static const struct intel_ring_buffer render_ring = {
- 	.name			= "render ring",
--	.regs                   = {
--		.ctl = PRB0_CTL,
--		.head = PRB0_HEAD,
--		.tail = PRB0_TAIL,
--		.start = PRB0_START
--	},
--	.ring_flag		= I915_EXEC_RENDER,
-+	.id			= RING_RENDER,
-+	.mmio_base		= RENDER_RING_BASE,
- 	.size			= 32 * PAGE_SIZE,
--	.alignment		= PAGE_SIZE,
--	.virtual_start		= NULL,
--	.dev			= NULL,
--	.gem_object		= NULL,
--	.head			= 0,
--	.tail			= 0,
--	.space			= 0,
--	.user_irq_refcount	= 0,
--	.irq_gem_seqno		= 0,
--	.waiting_gem_seqno	= 0,
--	.setup_status_page	= render_setup_status_page,
- 	.init			= init_render_ring,
--	.get_head		= render_ring_get_head,
--	.get_tail		= render_ring_get_tail,
--	.get_active_head	= render_ring_get_active_head,
--	.advance_ring		= render_ring_advance_ring,
-+	.write_tail		= ring_write_tail,
- 	.flush			= render_ring_flush,
- 	.add_request		= render_ring_add_request,
--	.get_gem_seqno		= render_ring_get_gem_seqno,
-+	.get_seqno		= render_ring_get_seqno,
- 	.user_irq_get		= render_ring_get_user_irq,
- 	.user_irq_put		= render_ring_put_user_irq,
- 	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
--	.status_page		= {NULL, 0, NULL},
--	.map			= {0,}
- };
- 
- /* ring buffer for bit-stream decoder */
- 
--struct intel_ring_buffer bsd_ring = {
-+static const struct intel_ring_buffer bsd_ring = {
- 	.name                   = "bsd ring",
--	.regs			= {
--		.ctl = BSD_RING_CTL,
--		.head = BSD_RING_HEAD,
--		.tail = BSD_RING_TAIL,
--		.start = BSD_RING_START
--	},
--	.ring_flag		= I915_EXEC_BSD,
-+	.id			= RING_BSD,
-+	.mmio_base		= BSD_RING_BASE,
- 	.size			= 32 * PAGE_SIZE,
--	.alignment		= PAGE_SIZE,
--	.virtual_start		= NULL,
--	.dev			= NULL,
--	.gem_object		= NULL,
--	.head			= 0,
--	.tail			= 0,
--	.space			= 0,
--	.user_irq_refcount	= 0,
--	.irq_gem_seqno		= 0,
--	.waiting_gem_seqno	= 0,
--	.setup_status_page	= bsd_setup_status_page,
- 	.init			= init_bsd_ring,
--	.get_head		= bsd_ring_get_head,
--	.get_tail		= bsd_ring_get_tail,
--	.get_active_head	= bsd_ring_get_active_head,
--	.advance_ring		= bsd_ring_advance_ring,
-+	.write_tail		= ring_write_tail,
- 	.flush			= bsd_ring_flush,
--	.add_request		= bsd_ring_add_request,
--	.get_gem_seqno		= bsd_ring_get_gem_seqno,
-+	.add_request		= ring_add_request,
-+	.get_seqno		= ring_status_page_get_seqno,
- 	.user_irq_get		= bsd_ring_get_user_irq,
- 	.user_irq_put		= bsd_ring_put_user_irq,
--	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
--	.status_page		= {NULL, 0, NULL},
--	.map			= {0,}
-+	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
-+};
-+
-+
-+static void gen6_bsd_ring_write_tail(struct drm_device *dev,
-+				     struct intel_ring_buffer *ring,
-+				     u32 value)
-+{
-+       drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+       /* Every tail move must follow the sequence below */
-+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
-+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
-+       I915_WRITE(GEN6_BSD_RNCID, 0x0);
-+
-+       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-+                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
-+                       50))
-+               DRM_ERROR("timed out waiting for IDLE Indicator\n");
-+
-+       I915_WRITE_TAIL(ring, value);
-+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
-+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
-+}
-+
-+static void gen6_ring_flush(struct drm_device *dev,
-+			    struct intel_ring_buffer *ring,
-+			    u32 invalidate_domains,
-+			    u32 flush_domains)
-+{
-+       intel_ring_begin(dev, ring, 4);
-+       intel_ring_emit(dev, ring, MI_FLUSH_DW);
-+       intel_ring_emit(dev, ring, 0);
-+       intel_ring_emit(dev, ring, 0);
-+       intel_ring_emit(dev, ring, 0);
-+       intel_ring_advance(dev, ring);
-+}
-+
-+static int
-+gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-+				  struct intel_ring_buffer *ring,
-+				  struct drm_i915_gem_execbuffer2 *exec,
-+				  struct drm_clip_rect *cliprects,
-+				  uint64_t exec_offset)
-+{
-+       uint32_t exec_start;
-+
-+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-+
-+       intel_ring_begin(dev, ring, 2);
-+       intel_ring_emit(dev, ring,
-+		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
-+       /* bit0-7 is the length on GEN6+ */
-+       intel_ring_emit(dev, ring, exec_start);
-+       intel_ring_advance(dev, ring);
-+
-+       return 0;
-+}
-+
-+/* ring buffer for Video Codec for Gen6+ */
-+static const struct intel_ring_buffer gen6_bsd_ring = {
-+       .name			= "gen6 bsd ring",
-+       .id			= RING_BSD,
-+       .mmio_base		= GEN6_BSD_RING_BASE,
-+       .size			= 32 * PAGE_SIZE,
-+       .init			= init_bsd_ring,
-+       .write_tail		= gen6_bsd_ring_write_tail,
-+       .flush			= gen6_ring_flush,
-+       .add_request		= ring_add_request,
-+       .get_seqno		= ring_status_page_get_seqno,
-+       .user_irq_get		= bsd_ring_get_user_irq,
-+       .user_irq_put		= bsd_ring_put_user_irq,
-+       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
-+};
-+
-+/* Blitter support (SandyBridge+) */
-+
-+static void
-+blt_ring_get_user_irq(struct drm_device *dev,
-+		      struct intel_ring_buffer *ring)
-+{
-+	/* do nothing */
-+}
-+static void
-+blt_ring_put_user_irq(struct drm_device *dev,
-+		      struct intel_ring_buffer *ring)
-+{
-+	/* do nothing */
-+}
-+
-+
-+/* Workaround for some stepping of SNB,
-+ * each time when BLT engine ring tail moved,
-+ * the first command in the ring to be parsed
-+ * should be MI_BATCH_BUFFER_START
-+ */
-+#define NEED_BLT_WORKAROUND(dev) \
-+	(IS_GEN6(dev) && (dev->pdev->revision < 8))
-+
-+static inline struct drm_i915_gem_object *
-+to_blt_workaround(struct intel_ring_buffer *ring)
-+{
-+	return ring->private;
-+}
-+
-+static int blt_ring_init(struct drm_device *dev,
-+			 struct intel_ring_buffer *ring)
-+{
-+	if (NEED_BLT_WORKAROUND(dev)) {
-+		struct drm_i915_gem_object *obj;
-+		u32 __iomem *ptr;
-+		int ret;
-+
-+		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
-+		if (obj == NULL)
-+			return -ENOMEM;
-+
-+		ret = i915_gem_object_pin(&obj->base, 4096);
-+		if (ret) {
-+			drm_gem_object_unreference(&obj->base);
-+			return ret;
-+		}
-+
-+		ptr = kmap(obj->pages[0]);
-+		iowrite32(MI_BATCH_BUFFER_END, ptr);
-+		iowrite32(MI_NOOP, ptr+1);
-+		kunmap(obj->pages[0]);
-+
-+		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
-+		if (ret) {
-+			i915_gem_object_unpin(&obj->base);
-+			drm_gem_object_unreference(&obj->base);
-+			return ret;
-+		}
-+
-+		ring->private = obj;
-+	}
-+
-+	return init_ring_common(dev, ring);
-+}
-+
-+static void blt_ring_begin(struct drm_device *dev,
-+			   struct intel_ring_buffer *ring,
-+			  int num_dwords)
-+{
-+	if (ring->private) {
-+		intel_ring_begin(dev, ring, num_dwords+2);
-+		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
-+		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
-+	} else
-+		intel_ring_begin(dev, ring, 4);
-+}
-+
-+static void blt_ring_flush(struct drm_device *dev,
-+			   struct intel_ring_buffer *ring,
-+			   u32 invalidate_domains,
-+			   u32 flush_domains)
-+{
-+	blt_ring_begin(dev, ring, 4);
-+	intel_ring_emit(dev, ring, MI_FLUSH_DW);
-+	intel_ring_emit(dev, ring, 0);
-+	intel_ring_emit(dev, ring, 0);
-+	intel_ring_emit(dev, ring, 0);
-+	intel_ring_advance(dev, ring);
-+}
-+
-+static u32
-+blt_ring_add_request(struct drm_device *dev,
-+		     struct intel_ring_buffer *ring,
-+		     u32 flush_domains)
-+{
-+	u32 seqno = i915_gem_get_seqno(dev);
-+
-+	blt_ring_begin(dev, ring, 4);
-+	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-+	intel_ring_emit(dev, ring,
-+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-+	intel_ring_emit(dev, ring, seqno);
-+	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-+	intel_ring_advance(dev, ring);
-+
-+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-+	return seqno;
-+}
-+
-+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
-+{
-+	if (!ring->private)
-+		return;
-+
-+	i915_gem_object_unpin(ring->private);
-+	drm_gem_object_unreference(ring->private);
-+	ring->private = NULL;
-+}
-+
-+static const struct intel_ring_buffer gen6_blt_ring = {
-+       .name			= "blt ring",
-+       .id			= RING_BLT,
-+       .mmio_base		= BLT_RING_BASE,
-+       .size			= 32 * PAGE_SIZE,
-+       .init			= blt_ring_init,
-+       .write_tail		= ring_write_tail,
-+       .flush			= blt_ring_flush,
-+       .add_request		= blt_ring_add_request,
-+       .get_seqno		= ring_status_page_get_seqno,
-+       .user_irq_get		= blt_ring_get_user_irq,
-+       .user_irq_put		= blt_ring_put_user_irq,
-+       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
-+       .cleanup			= blt_ring_cleanup,
- };
-+
-+int intel_init_render_ring_buffer(struct drm_device *dev)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+	dev_priv->render_ring = render_ring;
-+
-+	if (!I915_NEED_GFX_HWS(dev)) {
-+		dev_priv->render_ring.status_page.page_addr
-+			= dev_priv->status_page_dmah->vaddr;
-+		memset(dev_priv->render_ring.status_page.page_addr,
-+				0, PAGE_SIZE);
-+	}
-+
-+	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
-+}
-+
-+int intel_init_bsd_ring_buffer(struct drm_device *dev)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+	if (IS_GEN6(dev))
-+		dev_priv->bsd_ring = gen6_bsd_ring;
-+	else
-+		dev_priv->bsd_ring = bsd_ring;
-+
-+	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
-+}
-+
-+int intel_init_blt_ring_buffer(struct drm_device *dev)
-+{
-+	drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+	dev_priv->blt_ring = gen6_blt_ring;
-+
-+	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
-+}
-diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
-index 525e7d3..3126c26 100644
---- a/drivers/gpu/drm/i915/intel_ringbuffer.h
-+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
-@@ -7,25 +7,32 @@ struct  intel_hw_status_page {
- 	struct		drm_gem_object *obj;
- };
- 
-+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
-+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
-+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
-+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
-+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
-+
- struct drm_i915_gem_execbuffer2;
- struct  intel_ring_buffer {
- 	const char	*name;
--	struct		ring_regs {
--			u32 ctl;
--			u32 head;
--			u32 tail;
--			u32 start;
--	} regs;
--	unsigned int	ring_flag;
-+	enum intel_ring_id {
-+		RING_RENDER = 0x1,
-+		RING_BSD = 0x2,
-+		RING_BLT = 0x4,
-+	} id;
-+	u32		mmio_base;
- 	unsigned long	size;
--	unsigned int	alignment;
- 	void		*virtual_start;
- 	struct		drm_device *dev;
- 	struct		drm_gem_object *gem_object;
- 
- 	unsigned int	head;
- 	unsigned int	tail;
--	unsigned int	space;
-+	int		space;
- 	struct intel_hw_status_page status_page;
- 
- 	u32		irq_gem_seqno;		/* last seq seem at irq time */
-@@ -35,35 +42,28 @@ struct  intel_ring_buffer {
- 			struct intel_ring_buffer *ring);
- 	void		(*user_irq_put)(struct drm_device *dev,
- 			struct intel_ring_buffer *ring);
--	void		(*setup_status_page)(struct drm_device *dev,
--			struct	intel_ring_buffer *ring);
- 
- 	int		(*init)(struct drm_device *dev,
- 			struct intel_ring_buffer *ring);
- 
--	unsigned int	(*get_head)(struct drm_device *dev,
--			struct intel_ring_buffer *ring);
--	unsigned int	(*get_tail)(struct drm_device *dev,
--			struct intel_ring_buffer *ring);
--	unsigned int	(*get_active_head)(struct drm_device *dev,
--			struct intel_ring_buffer *ring);
--	void		(*advance_ring)(struct drm_device *dev,
--			struct intel_ring_buffer *ring);
-+	void		(*write_tail)(struct drm_device *dev,
-+				      struct intel_ring_buffer *ring,
-+				      u32 value);
- 	void		(*flush)(struct drm_device *dev,
- 			struct intel_ring_buffer *ring,
- 			u32	invalidate_domains,
- 			u32	flush_domains);
- 	u32		(*add_request)(struct drm_device *dev,
- 			struct intel_ring_buffer *ring,
--			struct drm_file *file_priv,
- 			u32 flush_domains);
--	u32		(*get_gem_seqno)(struct drm_device *dev,
--			struct intel_ring_buffer *ring);
-+	u32		(*get_seqno)(struct drm_device *dev,
-+				     struct intel_ring_buffer *ring);
- 	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
- 			struct intel_ring_buffer *ring,
- 			struct drm_i915_gem_execbuffer2 *exec,
- 			struct drm_clip_rect *cliprects,
- 			uint64_t exec_offset);
-+	void		(*cleanup)(struct intel_ring_buffer *ring);
- 
- 	/**
- 	 * List of objects currently involved in rendering from the
-@@ -83,8 +83,24 @@ struct  intel_ring_buffer {
- 	 */
- 	struct list_head request_list;
- 
-+	/**
-+	 * List of objects currently pending a GPU write flush.
-+	 *
-+	 * All elements on this list will belong to either the
-+	 * active_list or flushing_list, last_rendering_seqno can
-+	 * be used to differentiate between the two elements.
-+	 */
-+	struct list_head gpu_write_list;
-+
-+	/**
-+	 * Do we have some not yet emitted requests outstanding?
-+	 */
-+	bool outstanding_lazy_request;
-+
- 	wait_queue_head_t irq_queue;
- 	drm_local_map_t map;
-+
-+	void *private;
- };
- 
- static inline u32
-@@ -96,15 +112,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
- }
- 
- int intel_init_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring);
-+			   struct intel_ring_buffer *ring);
- void intel_cleanup_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring);
-+			       struct intel_ring_buffer *ring);
- int intel_wait_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring, int n);
--int intel_wrap_ring_buffer(struct drm_device *dev,
--		struct intel_ring_buffer *ring);
-+			   struct intel_ring_buffer *ring, int n);
- void intel_ring_begin(struct drm_device *dev,
--		struct intel_ring_buffer *ring, int n);
-+		      struct intel_ring_buffer *ring, int n);
- 
- static inline void intel_ring_emit(struct drm_device *dev,
- 				   struct intel_ring_buffer *ring,
-@@ -115,17 +129,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
- 	ring->tail += 4;
- }
- 
--void intel_fill_struct(struct drm_device *dev,
--		struct intel_ring_buffer *ring,
--		void *data,
--		unsigned int len);
- void intel_ring_advance(struct drm_device *dev,
- 		struct intel_ring_buffer *ring);
- 
- u32 intel_ring_get_seqno(struct drm_device *dev,
- 		struct intel_ring_buffer *ring);
- 
--extern struct intel_ring_buffer render_ring;
--extern struct intel_ring_buffer bsd_ring;
-+int intel_init_render_ring_buffer(struct drm_device *dev);
-+int intel_init_bsd_ring_buffer(struct drm_device *dev);
-+int intel_init_blt_ring_buffer(struct drm_device *dev);
-+
-+u32 intel_ring_get_active_head(struct drm_device *dev,
-+			       struct intel_ring_buffer *ring);
-+void intel_ring_setup_status_page(struct drm_device *dev,
-+				  struct intel_ring_buffer *ring);
- 
- #endif /* _INTEL_RINGBUFFER_H_ */
-diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
-index ee73e42..de158b7 100644
---- a/drivers/gpu/drm/i915/intel_sdvo.c
-+++ b/drivers/gpu/drm/i915/intel_sdvo.c
-@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
- struct intel_sdvo {
- 	struct intel_encoder base;
- 
-+	struct i2c_adapter *i2c;
- 	u8 slave_addr;
- 
-+	struct i2c_adapter ddc;
-+
- 	/* Register for the SDVO device: SDVOB or SDVOC */
- 	int sdvo_reg;
- 
-@@ -104,34 +107,24 @@ struct intel_sdvo {
- 	 * This is set if we treat the device as HDMI, instead of DVI.
- 	 */
- 	bool is_hdmi;
-+	bool has_audio;
- 
- 	/**
--	 * This is set if we detect output of sdvo device as LVDS.
-+	 * This is set if we detect output of sdvo device as LVDS and
-+	 * have a valid fixed mode to use with the panel.
- 	 */
- 	bool is_lvds;
- 
- 	/**
--	 * This is sdvo flags for input timing.
--	 */
--	uint8_t sdvo_flags;
--
--	/**
- 	 * This is sdvo fixed pannel mode pointer
- 	 */
- 	struct drm_display_mode *sdvo_lvds_fixed_mode;
- 
--	/*
--	 * supported encoding mode, used to determine whether HDMI is
--	 * supported
--	 */
--	struct intel_sdvo_encode encode;
--
- 	/* DDC bus used by this SDVO encoder */
- 	uint8_t ddc_bus;
- 
--	/* Mac mini hack -- use the same DDC as the analog connector */
--	struct i2c_adapter *analog_ddc_bus;
--
-+	/* Input timings for adjusted_mode */
-+	struct intel_sdvo_dtd input_dtd;
- };
- 
- struct intel_sdvo_connector {
-@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
- 	/* Mark the type of connector */
- 	uint16_t output_flag;
- 
-+	int force_audio;
-+
- 	/* This contains all current supported TV format */
- 	u8 tv_format_supported[TV_FORMAT_NUM];
- 	int   format_supported_num;
- 	struct drm_property *tv_format;
- 
-+	struct drm_property *force_audio_property;
-+
- 	/* add the property for the SDVO-TV */
- 	struct drm_property *left;
- 	struct drm_property *right;
-@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
- 	u32	cur_dot_crawl,	max_dot_crawl;
- };
- 
--static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
-+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
-+{
-+	return container_of(encoder, struct intel_sdvo, base.base);
-+}
-+
-+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_sdvo, base);
- }
- 
- static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
-@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
-  */
- static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
- {
--	struct drm_device *dev = intel_sdvo->base.enc.dev;
-+	struct drm_device *dev = intel_sdvo->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	u32 bval = val, cval = val;
- 	int i;
-@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
- 
- static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
- {
--	u8 out_buf[2] = { addr, 0 };
--	u8 buf[2];
- 	struct i2c_msg msgs[] = {
- 		{
--			.addr = intel_sdvo->slave_addr >> 1,
-+			.addr = intel_sdvo->slave_addr,
- 			.flags = 0,
- 			.len = 1,
--			.buf = out_buf,
-+			.buf = &addr,
- 		},
- 		{
--			.addr = intel_sdvo->slave_addr >> 1,
-+			.addr = intel_sdvo->slave_addr,
- 			.flags = I2C_M_RD,
- 			.len = 1,
--			.buf = buf,
-+			.buf = ch,
- 		}
- 	};
- 	int ret;
- 
--	if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
--	{
--		*ch = buf[0];
-+	if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
- 		return true;
--	}
- 
- 	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
- 	return false;
- }
- 
--static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
--{
--	u8 out_buf[2] = { addr, ch };
--	struct i2c_msg msgs[] = {
--		{
--			.addr = intel_sdvo->slave_addr >> 1,
--			.flags = 0,
--			.len = 2,
--			.buf = out_buf,
--		}
--	};
--
--	return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
--}
--
- #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
- /** Mapping of command numbers to names, for debug output */
- static const struct _sdvo_cmd_name {
-@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
- 	DRM_LOG_KMS("\n");
- }
- 
--static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
--				 const void *args, int args_len)
--{
--	int i;
--
--	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
--
--	for (i = 0; i < args_len; i++) {
--		if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
--					   ((u8*)args)[i]))
--			return false;
--	}
--
--	return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
--}
--
- static const char *cmd_status_names[] = {
- 	"Power on",
- 	"Success",
-@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
- 	"Scaling not supported"
- };
- 
--static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
--				      void *response, int response_len,
--				      u8 status)
-+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
-+				 const void *args, int args_len)
- {
--	int i;
-+	u8 buf[args_len*2 + 2], status;
-+	struct i2c_msg msgs[args_len + 3];
-+	int i, ret;
- 
--	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
--	for (i = 0; i < response_len; i++)
--		DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
--	for (; i < 8; i++)
--		DRM_LOG_KMS("   ");
--	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
--		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
--	else
--		DRM_LOG_KMS("(??? %d)", status);
--	DRM_LOG_KMS("\n");
-+	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
-+
-+	for (i = 0; i < args_len; i++) {
-+		msgs[i].addr = intel_sdvo->slave_addr;
-+		msgs[i].flags = 0;
-+		msgs[i].len = 2;
-+		msgs[i].buf = buf + 2 *i;
-+		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
-+		buf[2*i + 1] = ((u8*)args)[i];
-+	}
-+	msgs[i].addr = intel_sdvo->slave_addr;
-+	msgs[i].flags = 0;
-+	msgs[i].len = 2;
-+	msgs[i].buf = buf + 2*i;
-+	buf[2*i + 0] = SDVO_I2C_OPCODE;
-+	buf[2*i + 1] = cmd;
-+
-+	/* the following two are to read the response */
-+	status = SDVO_I2C_CMD_STATUS;
-+	msgs[i+1].addr = intel_sdvo->slave_addr;
-+	msgs[i+1].flags = 0;
-+	msgs[i+1].len = 1;
-+	msgs[i+1].buf = &status;
-+
-+	msgs[i+2].addr = intel_sdvo->slave_addr;
-+	msgs[i+2].flags = I2C_M_RD;
-+	msgs[i+2].len = 1;
-+	msgs[i+2].buf = &status;
-+
-+	ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
-+	if (ret < 0) {
-+		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
-+		return false;
-+	}
-+	if (ret != i+3) {
-+		/* failure in I2C transfer */
-+		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
-+		return false;
-+	}
-+
-+	i = 3;
-+	while (status == SDVO_CMD_STATUS_PENDING && i--) {
-+		if (!intel_sdvo_read_byte(intel_sdvo,
-+					  SDVO_I2C_CMD_STATUS,
-+					  &status))
-+			return false;
-+	}
-+	if (status != SDVO_CMD_STATUS_SUCCESS) {
-+		DRM_DEBUG_KMS("command returns response %s [%d]\n",
-+			      status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
-+			      status);
-+		return false;
-+	}
-+
-+	return true;
- }
- 
- static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
- 				     void *response, int response_len)
- {
--	int i;
-+	u8 retry = 5;
- 	u8 status;
--	u8 retry = 50;
--
--	while (retry--) {
--		/* Read the command response */
--		for (i = 0; i < response_len; i++) {
--			if (!intel_sdvo_read_byte(intel_sdvo,
--						  SDVO_I2C_RETURN_0 + i,
--						  &((u8 *)response)[i]))
--				return false;
--		}
-+	int i;
- 
--		/* read the return status */
--		if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
-+	/*
-+	 * The documentation states that all commands will be
-+	 * processed within 15µs, and that we need only poll
-+	 * the status byte a maximum of 3 times in order for the
-+	 * command to be complete.
-+	 *
-+	 * Check 5 times in case the hardware failed to read the docs.
-+	 */
-+	do {
-+		if (!intel_sdvo_read_byte(intel_sdvo,
-+					  SDVO_I2C_CMD_STATUS,
- 					  &status))
- 			return false;
-+	} while (status == SDVO_CMD_STATUS_PENDING && --retry);
- 
--		intel_sdvo_debug_response(intel_sdvo, response, response_len,
--					  status);
--		if (status != SDVO_CMD_STATUS_PENDING)
--			break;
-+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
-+	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-+		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
-+	else
-+		DRM_LOG_KMS("(??? %d)", status);
- 
--		mdelay(50);
-+	if (status != SDVO_CMD_STATUS_SUCCESS)
-+		goto log_fail;
-+
-+	/* Read the command response */
-+	for (i = 0; i < response_len; i++) {
-+		if (!intel_sdvo_read_byte(intel_sdvo,
-+					  SDVO_I2C_RETURN_0 + i,
-+					  &((u8 *)response)[i]))
-+			goto log_fail;
-+		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
- 	}
-+	DRM_LOG_KMS("\n");
-+	return true;
- 
--	return status == SDVO_CMD_STATUS_SUCCESS;
-+log_fail:
-+	DRM_LOG_KMS("\n");
-+	return false;
- }
- 
- static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
-@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
- 		return 4;
- }
- 
--/**
-- * Try to read the response after issuie the DDC switch command. But it
-- * is noted that we must do the action of reading response and issuing DDC
-- * switch command in one I2C transaction. Otherwise when we try to start
-- * another I2C transaction after issuing the DDC bus switch, it will be
-- * switched to the internal SDVO register.
-- */
--static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
--					      u8 target)
-+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
-+					      u8 ddc_bus)
- {
--	u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
--	struct i2c_msg msgs[] = {
--		{
--			.addr = intel_sdvo->slave_addr >> 1,
--			.flags = 0,
--			.len = 2,
--			.buf = out_buf,
--		},
--		/* the following two are to read the response */
--		{
--			.addr = intel_sdvo->slave_addr >> 1,
--			.flags = 0,
--			.len = 1,
--			.buf = cmd_buf,
--		},
--		{
--			.addr = intel_sdvo->slave_addr >> 1,
--			.flags = I2C_M_RD,
--			.len = 1,
--			.buf = ret_value,
--		},
--	};
--
--	intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
--					&target, 1);
--	/* write the DDC switch command argument */
--	intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
--
--	out_buf[0] = SDVO_I2C_OPCODE;
--	out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
--	cmd_buf[0] = SDVO_I2C_CMD_STATUS;
--	cmd_buf[1] = 0;
--	ret_value[0] = 0;
--	ret_value[1] = 0;
--
--	ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
--	if (ret != 3) {
--		/* failure in I2C transfer */
--		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
--		return;
--	}
--	if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
--		DRM_DEBUG_KMS("DDC switch command returns response %d\n",
--					ret_value[0]);
--		return;
--	}
--	return;
-+	return intel_sdvo_write_cmd(intel_sdvo,
-+				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
-+				    &ddc_bus, 1);
- }
- 
- static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
- {
--	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
--		return false;
--
--	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
-+	return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
- }
- 
- static bool
-@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- 		mode->flags |= DRM_MODE_FLAG_PVSYNC;
- }
- 
--static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
--				       struct intel_sdvo_encode *encode)
-+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
- {
--	if (intel_sdvo_get_value(intel_sdvo,
--				  SDVO_CMD_GET_SUPP_ENCODE,
--				  encode, sizeof(*encode)))
--		return true;
-+	struct intel_sdvo_encode encode;
- 
--	/* non-support means DVI */
--	memset(encode, 0, sizeof(*encode));
--	return false;
-+	return intel_sdvo_get_value(intel_sdvo,
-+				  SDVO_CMD_GET_SUPP_ENCODE,
-+				  &encode, sizeof(encode));
- }
- 
- static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
-@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
- }
- #endif
- 
--static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
--				    int index,
--				    uint8_t *data, int8_t size, uint8_t tx_rate)
--{
--    uint8_t set_buf_index[2];
--
--    set_buf_index[0] = index;
--    set_buf_index[1] = 0;
--
--    if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
--			      set_buf_index, 2))
--	    return false;
--
--    for (; size > 0; size -= 8) {
--	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
--		return false;
--
--	data += 8;
--    }
--
--    return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
--}
--
--static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
--{
--	uint8_t csum = 0;
--	int i;
--
--	for (i = 0; i < size; i++)
--		csum += data[i];
--
--	return 0x100 - csum;
--}
--
--#define DIP_TYPE_AVI	0x82
--#define DIP_VERSION_AVI	0x2
--#define DIP_LEN_AVI	13
--
--struct dip_infoframe {
--	uint8_t type;
--	uint8_t version;
--	uint8_t len;
--	uint8_t checksum;
--	union {
--		struct {
--			/* Packet Byte #1 */
--			uint8_t S:2;
--			uint8_t B:2;
--			uint8_t A:1;
--			uint8_t Y:2;
--			uint8_t rsvd1:1;
--			/* Packet Byte #2 */
--			uint8_t R:4;
--			uint8_t M:2;
--			uint8_t C:2;
--			/* Packet Byte #3 */
--			uint8_t SC:2;
--			uint8_t Q:2;
--			uint8_t EC:3;
--			uint8_t ITC:1;
--			/* Packet Byte #4 */
--			uint8_t VIC:7;
--			uint8_t rsvd2:1;
--			/* Packet Byte #5 */
--			uint8_t PR:4;
--			uint8_t rsvd3:4;
--			/* Packet Byte #6~13 */
--			uint16_t top_bar_end;
--			uint16_t bottom_bar_start;
--			uint16_t left_bar_end;
--			uint16_t right_bar_start;
--		} avi;
--		struct {
--			/* Packet Byte #1 */
--			uint8_t channel_count:3;
--			uint8_t rsvd1:1;
--			uint8_t coding_type:4;
--			/* Packet Byte #2 */
--			uint8_t sample_size:2; /* SS0, SS1 */
--			uint8_t sample_frequency:3;
--			uint8_t rsvd2:3;
--			/* Packet Byte #3 */
--			uint8_t coding_type_private:5;
--			uint8_t rsvd3:3;
--			/* Packet Byte #4 */
--			uint8_t channel_allocation;
--			/* Packet Byte #5 */
--			uint8_t rsvd4:3;
--			uint8_t level_shift:4;
--			uint8_t downmix_inhibit:1;
--		} audio;
--		uint8_t payload[28];
--	} __attribute__ ((packed)) u;
--} __attribute__((packed));
--
--static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
--					 struct drm_display_mode * mode)
-+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
- {
- 	struct dip_infoframe avi_if = {
- 		.type = DIP_TYPE_AVI,
--		.version = DIP_VERSION_AVI,
-+		.ver = DIP_VERSION_AVI,
- 		.len = DIP_LEN_AVI,
- 	};
-+	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
-+	uint8_t set_buf_index[2] = { 1, 0 };
-+	uint64_t *data = (uint64_t *)&avi_if;
-+	unsigned i;
-+
-+	intel_dip_infoframe_csum(&avi_if);
-+
-+	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
-+				  set_buf_index, 2))
-+		return false;
- 
--	avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
--						    4 + avi_if.len);
--	return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
--				       4 + avi_if.len,
--				       SDVO_HBUF_TX_VSYNC);
-+	for (i = 0; i < sizeof(avi_if); i += 8) {
-+		if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
-+					  data, 8))
-+			return false;
-+		data++;
-+	}
-+
-+	return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
-+				    &tx_rate, 1);
- }
- 
- static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
-@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- 					struct drm_display_mode *mode,
- 					struct drm_display_mode *adjusted_mode)
- {
--	struct intel_sdvo_dtd input_dtd;
--
- 	/* Reset the input timing to the screen. Assume always input 0. */
- 	if (!intel_sdvo_set_target_input(intel_sdvo))
- 		return false;
-@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- 		return false;
- 
- 	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
--						   &input_dtd))
-+						   &intel_sdvo->input_dtd))
- 		return false;
- 
--	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
--	intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
-+	intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
- 
- 	drm_mode_set_crtcinfo(adjusted_mode, 0);
--	mode->clock = adjusted_mode->clock;
- 	return true;
- }
- 
-@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- 				  struct drm_display_mode *mode,
- 				  struct drm_display_mode *adjusted_mode)
- {
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
-+	int multiplier;
- 
- 	/* We need to construct preferred input timings based on our
- 	 * output timings.  To do that, we have to set the output
-@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- 							     mode,
- 							     adjusted_mode);
- 	} else if (intel_sdvo->is_lvds) {
--		drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
--
- 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
--							    intel_sdvo->sdvo_lvds_fixed_mode))
-+							     intel_sdvo->sdvo_lvds_fixed_mode))
- 			return false;
- 
- 		(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
-@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- 	}
- 
- 	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
--	 * SDVO device will be told of the multiplier during mode_set.
-+	 * SDVO device will factor out the multiplier during mode_set.
- 	 */
--	adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
-+	multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
-+	intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
- 
- 	return true;
- }
-@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct drm_crtc *crtc = encoder->crtc;
- 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
--	u32 sdvox = 0;
--	int sdvo_pixel_multiply, rate;
-+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
-+	u32 sdvox;
- 	struct intel_sdvo_in_out_map in_out;
- 	struct intel_sdvo_dtd input_dtd;
-+	int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-+	int rate;
- 
- 	if (!mode)
- 		return;
-@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- 			     SDVO_CMD_SET_IN_OUT_MAP,
- 			     &in_out, sizeof(in_out));
- 
--	if (intel_sdvo->is_hdmi) {
--		if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
--			return;
--
--		sdvox |= SDVO_AUDIO_ENABLE;
--	}
-+	/* Set the output timings to the screen */
-+	if (!intel_sdvo_set_target_output(intel_sdvo,
-+					  intel_sdvo->attached_output))
-+		return;
- 
- 	/* We have tried to get input timing in mode_fixup, and filled into
--	   adjusted_mode */
--	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
--	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
--		input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
--
--	/* If it's a TV, we already set the output timing in mode_fixup.
--	 * Otherwise, the output timing is equal to the input timing.
-+	 * adjusted_mode.
- 	 */
--	if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
-+	if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-+		input_dtd = intel_sdvo->input_dtd;
-+	} else {
- 		/* Set the output timing to the screen */
- 		if (!intel_sdvo_set_target_output(intel_sdvo,
- 						  intel_sdvo->attached_output))
- 			return;
- 
-+		intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- 		(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
- 	}
- 
-@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- 	if (!intel_sdvo_set_target_input(intel_sdvo))
- 		return;
- 
--	if (intel_sdvo->is_tv) {
--		if (!intel_sdvo_set_tv_format(intel_sdvo))
--			return;
--	}
-+	if (intel_sdvo->is_hdmi &&
-+	    !intel_sdvo_set_avi_infoframe(intel_sdvo))
-+		return;
- 
--	/* We would like to use intel_sdvo_create_preferred_input_timing() to
--	 * provide the device with a timing it can support, if it supports that
--	 * feature.  However, presumably we would need to adjust the CRTC to
--	 * output the preferred timing, and we don't support that currently.
--	 */
--#if 0
--	success = intel_sdvo_create_preferred_input_timing(encoder, clock,
--							   width, height);
--	if (success) {
--		struct intel_sdvo_dtd *input_dtd;
-+	if (intel_sdvo->is_tv &&
-+	    !intel_sdvo_set_tv_format(intel_sdvo))
-+		return;
- 
--		intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
--		intel_sdvo_set_input_timing(encoder, &input_dtd);
--	}
--#else
- 	(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
--#endif
- 
--	sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
--	switch (sdvo_pixel_multiply) {
-+	switch (pixel_multiplier) {
-+	default:
- 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
- 	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
- 	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
-@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- 		return;
- 
- 	/* Set the SDVO control regs. */
--	if (IS_I965G(dev)) {
--		sdvox |= SDVO_BORDER_ENABLE;
-+	if (INTEL_INFO(dev)->gen >= 4) {
-+		sdvox = SDVO_BORDER_ENABLE;
- 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- 			sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
- 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- 			sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- 	} else {
--		sdvox |= I915_READ(intel_sdvo->sdvo_reg);
-+		sdvox = I915_READ(intel_sdvo->sdvo_reg);
- 		switch (intel_sdvo->sdvo_reg) {
- 		case SDVOB:
- 			sdvox &= SDVOB_PRESERVE_MASK;
-@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- 	}
- 	if (intel_crtc->pipe == 1)
- 		sdvox |= SDVO_PIPE_B_SELECT;
-+	if (intel_sdvo->has_audio)
-+		sdvox |= SDVO_AUDIO_ENABLE;
- 
--	if (IS_I965G(dev)) {
-+	if (INTEL_INFO(dev)->gen >= 4) {
- 		/* done in crtc_mode_set as the dpll_md reg must be written early */
- 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- 		/* done in crtc_mode_set as it lives inside the dpll register */
- 	} else {
--		sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
-+		sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
- 	}
- 
--	if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
-+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
- 		sdvox |= SDVO_STALL_SELECT;
- 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
- }
-@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- 	u32 temp;
- 
-@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
- static int intel_sdvo_mode_valid(struct drm_connector *connector,
- 				 struct drm_display_mode *mode)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- 
- 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- 		return MODE_NO_DBLESCAN;
-@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
- 
- static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
- {
--	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
-+	if (!intel_sdvo_get_value(intel_sdvo,
-+				  SDVO_CMD_GET_DEVICE_CAPS,
-+				  caps, sizeof(*caps)))
-+		return false;
-+
-+	DRM_DEBUG_KMS("SDVO capabilities:\n"
-+		      "  vendor_id: %d\n"
-+		      "  device_id: %d\n"
-+		      "  device_rev_id: %d\n"
-+		      "  sdvo_version_major: %d\n"
-+		      "  sdvo_version_minor: %d\n"
-+		      "  sdvo_inputs_mask: %d\n"
-+		      "  smooth_scaling: %d\n"
-+		      "  sharp_scaling: %d\n"
-+		      "  up_scaling: %d\n"
-+		      "  down_scaling: %d\n"
-+		      "  stall_support: %d\n"
-+		      "  output_flags: %d\n",
-+		      caps->vendor_id,
-+		      caps->device_id,
-+		      caps->device_rev_id,
-+		      caps->sdvo_version_major,
-+		      caps->sdvo_version_minor,
-+		      caps->sdvo_inputs_mask,
-+		      caps->smooth_scaling,
-+		      caps->sharp_scaling,
-+		      caps->up_scaling,
-+		      caps->down_scaling,
-+		      caps->stall_support,
-+		      caps->output_flags);
-+
-+	return true;
- }
- 
- /* No use! */
-@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
- 	return (caps > 1);
- }
- 
-+static struct edid *
-+intel_sdvo_get_edid(struct drm_connector *connector)
-+{
-+	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
-+	return drm_get_edid(connector, &sdvo->ddc);
-+}
-+
- static struct drm_connector *
- intel_find_analog_connector(struct drm_device *dev)
- {
- 	struct drm_connector *connector;
--	struct drm_encoder *encoder;
--	struct intel_sdvo *intel_sdvo;
--
--	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
--		intel_sdvo = enc_to_intel_sdvo(encoder);
--		if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
--			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
--				if (encoder == intel_attached_encoder(connector))
-+	struct intel_sdvo *encoder;
-+
-+	list_for_each_entry(encoder,
-+			    &dev->mode_config.encoder_list,
-+			    base.base.head) {
-+		if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
-+			list_for_each_entry(connector,
-+					    &dev->mode_config.connector_list,
-+					    head) {
-+				if (&encoder->base ==
-+				    intel_attached_encoder(connector))
- 					return connector;
- 			}
- 		}
- 	}
-+
- 	return NULL;
- }
- 
-@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
- 	return true;
- }
- 
-+/* Mac mini hack -- use the same DDC as the analog connector */
-+static struct edid *
-+intel_sdvo_get_analog_edid(struct drm_connector *connector)
-+{
-+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-+
-+	if (!intel_analog_is_connected(connector->dev))
-+		return NULL;
-+
-+	return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
-+}
-+
- enum drm_connector_status
- intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
--	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
--	enum drm_connector_status status = connector_status_connected;
--	struct edid *edid = NULL;
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-+	enum drm_connector_status status;
-+	struct edid *edid;
- 
--	edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
-+	edid = intel_sdvo_get_edid(connector);
- 
--	/* This is only applied to SDVO cards with multiple outputs */
- 	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
--		uint8_t saved_ddc, temp_ddc;
--		saved_ddc = intel_sdvo->ddc_bus;
--		temp_ddc = intel_sdvo->ddc_bus >> 1;
-+		u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
-+
- 		/*
- 		 * Don't use the 1 as the argument of DDC bus switch to get
- 		 * the EDID. It is used for SDVO SPD ROM.
- 		 */
--		while(temp_ddc > 1) {
--			intel_sdvo->ddc_bus = temp_ddc;
--			edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
--			if (edid) {
--				/*
--				 * When we can get the EDID, maybe it is the
--				 * correct DDC bus. Update it.
--				 */
--				intel_sdvo->ddc_bus = temp_ddc;
-+		for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
-+			intel_sdvo->ddc_bus = ddc;
-+			edid = intel_sdvo_get_edid(connector);
-+			if (edid)
- 				break;
--			}
--			temp_ddc >>= 1;
- 		}
-+		/*
-+		 * If we found the EDID on the other bus,
-+		 * assume that is the correct DDC bus.
-+		 */
- 		if (edid == NULL)
- 			intel_sdvo->ddc_bus = saved_ddc;
- 	}
--	/* when there is no edid and no monitor is connected with VGA
--	 * port, try to use the CRT ddc to read the EDID for DVI-connector
-+
-+	/*
-+	 * When there is no edid and no monitor is connected with VGA
-+	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
- 	 */
--	if (edid == NULL && intel_sdvo->analog_ddc_bus &&
--	    !intel_analog_is_connected(connector->dev))
--		edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
-+	if (edid == NULL)
-+		edid = intel_sdvo_get_analog_edid(connector);
- 
-+	status = connector_status_unknown;
- 	if (edid != NULL) {
--		bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
--		bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
--
- 		/* DDC bus is shared, match EDID to connector type */
--		if (is_digital && need_digital)
-+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
-+			status = connector_status_connected;
- 			intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
--		else if (is_digital != need_digital)
--			status = connector_status_disconnected;
--
-+			intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
-+		}
- 		connector->display_info.raw_edid = NULL;
--	} else
--		status = connector_status_disconnected;
--	
--	kfree(edid);
-+		kfree(edid);
-+	}
-+
-+	if (status == connector_status_connected) {
-+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-+		if (intel_sdvo_connector->force_audio)
-+			intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
-+	}
- 
- 	return status;
- }
-@@ -1490,13 +1408,12 @@ static enum drm_connector_status
- intel_sdvo_detect(struct drm_connector *connector, bool force)
- {
- 	uint16_t response;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- 	enum drm_connector_status ret;
- 
- 	if (!intel_sdvo_write_cmd(intel_sdvo,
--			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
-+				  SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- 		return connector_status_unknown;
- 	if (intel_sdvo->is_tv) {
- 		/* add 30ms delay when the output type is SDVO-TV */
-@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
- 	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
- 		return connector_status_unknown;
- 
--	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
-+	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
-+		      response & 0xff, response >> 8,
-+		      intel_sdvo_connector->output_flag);
- 
- 	if (response == 0)
- 		return connector_status_disconnected;
-@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
- 
- static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
--	int num_modes;
-+	struct edid *edid;
- 
- 	/* set the bus switch and get the modes */
--	num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
-+	edid = intel_sdvo_get_edid(connector);
- 
- 	/*
- 	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
-@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
- 	 * DDC fails, check to see if the analog output is disconnected, in
- 	 * which case we'll look there for the digital DDC data.
- 	 */
--	if (num_modes == 0 &&
--	    intel_sdvo->analog_ddc_bus &&
--	    !intel_analog_is_connected(connector->dev)) {
--		/* Switch to the analog ddc bus and try that
--		 */
--		(void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
-+	if (edid == NULL)
-+		edid = intel_sdvo_get_analog_edid(connector);
-+
-+	if (edid != NULL) {
-+		drm_mode_connector_update_edid_property(connector, edid);
-+		drm_add_edid_modes(connector, edid);
-+		connector->display_info.raw_edid = NULL;
-+		kfree(edid);
- 	}
- }
- 
-@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
- 
- static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- 	struct intel_sdvo_sdtv_resolution_request tv_res;
- 	uint32_t reply = 0, format_map = 0;
- 	int i;
-@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
- 		return;
- 
- 	BUILD_BUG_ON(sizeof(tv_res) != 3);
--	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
-+	if (!intel_sdvo_write_cmd(intel_sdvo,
-+				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- 				  &tv_res, sizeof(tv_res)))
- 		return;
- 	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
-@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
- 
- static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
- 	struct drm_display_mode *newmode;
- 
-@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
- 	 * Assume that the preferred modes are
- 	 * arranged in priority order.
- 	 */
--	intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
-+	intel_ddc_get_modes(connector, intel_sdvo->i2c);
- 	if (list_empty(&connector->probed_modes) == false)
- 		goto end;
- 
-@@ -1693,6 +1611,10 @@ end:
- 		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- 			intel_sdvo->sdvo_lvds_fixed_mode =
- 				drm_mode_duplicate(connector->dev, newmode);
-+
-+			drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
-+					      0);
-+
- 			intel_sdvo->is_lvds = true;
- 			break;
- 		}
-@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
- 			struct drm_property *property,
- 			uint64_t val)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- 	uint16_t temp_value;
- 	uint8_t cmd;
-@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
- 	if (ret)
- 		return ret;
- 
-+	if (property == intel_sdvo_connector->force_audio_property) {
-+		if (val == intel_sdvo_connector->force_audio)
-+			return 0;
-+
-+		intel_sdvo_connector->force_audio = val;
-+
-+		if (val > 0 && intel_sdvo->has_audio)
-+			return 0;
-+		if (val < 0 && !intel_sdvo->has_audio)
-+			return 0;
-+
-+		intel_sdvo->has_audio = val > 0;
-+		goto done;
-+	}
-+
- #define CHECK_PROPERTY(name, NAME) \
- 	if (intel_sdvo_connector->name == property) { \
- 		if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
-@@ -1879,9 +1815,8 @@ set_value:
- 
- 
- done:
--	if (encoder->crtc) {
--		struct drm_crtc *crtc = encoder->crtc;
--
-+	if (intel_sdvo->base.base.crtc) {
-+		struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- 		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- 					 crtc->y, crtc->fb);
- 	}
-@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
- 	.get_modes = intel_sdvo_get_modes,
- 	.mode_valid = intel_sdvo_mode_valid,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
- {
--	struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
--
--	if (intel_sdvo->analog_ddc_bus)
--		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
-+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- 
- 	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
- 		drm_mode_destroy(encoder->dev,
- 				 intel_sdvo->sdvo_lvds_fixed_mode);
- 
-+	i2c_del_adapter(&intel_sdvo->ddc);
- 	intel_encoder_destroy(encoder);
- }
- 
-@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
- 		intel_sdvo_guess_ddc_bus(sdvo);
- }
- 
--static bool
--intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
-+static void
-+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
-+			  struct intel_sdvo *sdvo, u32 reg)
- {
--	return intel_sdvo_set_target_output(intel_sdvo,
--					    device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
--		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
--				     &intel_sdvo->is_hdmi, 1);
--}
-+	struct sdvo_device_mapping *mapping;
-+	u8 pin, speed;
- 
--static struct intel_sdvo *
--intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
--{
--	struct drm_device *dev = chan->drm_dev;
--	struct drm_encoder *encoder;
-+	if (IS_SDVOB(reg))
-+		mapping = &dev_priv->sdvo_mappings[0];
-+	else
-+		mapping = &dev_priv->sdvo_mappings[1];
- 
--	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
--		struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
--		if (intel_sdvo->base.ddc_bus == &chan->adapter)
--			return intel_sdvo;
-+	pin = GMBUS_PORT_DPB;
-+	speed = GMBUS_RATE_1MHZ >> 8;
-+	if (mapping->initialized) {
-+		pin = mapping->i2c_pin;
-+		speed = mapping->i2c_speed;
- 	}
- 
--	return NULL;
-+	sdvo->i2c = &dev_priv->gmbus[pin].adapter;
-+	intel_gmbus_set_speed(sdvo->i2c, speed);
-+	intel_gmbus_force_bit(sdvo->i2c, true);
- }
- 
--static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
--				  struct i2c_msg msgs[], int num)
-+static bool
-+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
- {
--	struct intel_sdvo *intel_sdvo;
--	struct i2c_algo_bit_data *algo_data;
--	const struct i2c_algorithm *algo;
-+	int is_hdmi;
- 
--	algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
--	intel_sdvo =
--		intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
--					      (algo_data->data));
--	if (intel_sdvo == NULL)
--		return -EINVAL;
-+	if (!intel_sdvo_check_supp_encode(intel_sdvo))
-+		return false;
- 
--	algo = intel_sdvo->base.i2c_bus->algo;
-+	if (!intel_sdvo_set_target_output(intel_sdvo,
-+					  device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
-+		return false;
- 
--	intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
--	return algo->master_xfer(i2c_adap, msgs, num);
--}
-+	is_hdmi = 0;
-+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
-+		return false;
- 
--static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
--	.master_xfer	= intel_sdvo_master_xfer,
--};
-+	return !!is_hdmi;
-+}
- 
- static u8
- intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
-@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
- }
- 
- static void
--intel_sdvo_connector_init(struct drm_encoder *encoder,
--			  struct drm_connector *connector)
-+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
-+			  struct intel_sdvo *encoder)
- {
--	drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
--			   connector->connector_type);
-+	drm_connector_init(encoder->base.base.dev,
-+			   &connector->base.base,
-+			   &intel_sdvo_connector_funcs,
-+			   connector->base.base.connector_type);
-+
-+	drm_connector_helper_add(&connector->base.base,
-+				 &intel_sdvo_connector_helper_funcs);
-+
-+	connector->base.base.interlace_allowed = 0;
-+	connector->base.base.doublescan_allowed = 0;
-+	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- 
--	drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
-+	intel_connector_attach_encoder(&connector->base, &encoder->base);
-+	drm_sysfs_connector_add(&connector->base.base);
-+}
- 
--	connector->interlace_allowed = 0;
--	connector->doublescan_allowed = 0;
--	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-+static void
-+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
-+{
-+	struct drm_device *dev = connector->base.base.dev;
- 
--	drm_mode_connector_attach_encoder(connector, encoder);
--	drm_sysfs_connector_add(connector);
-+	connector->force_audio_property =
-+		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-+	if (connector->force_audio_property) {
-+		connector->force_audio_property->values[0] = -1;
-+		connector->force_audio_property->values[1] = 1;
-+		drm_connector_attach_property(&connector->base.base,
-+					      connector->force_audio_property, 0);
-+	}
- }
- 
- static bool
- intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
- {
--	struct drm_encoder *encoder = &intel_sdvo->base.enc;
-+	struct drm_encoder *encoder = &intel_sdvo->base.base;
- 	struct drm_connector *connector;
- 	struct intel_connector *intel_connector;
- 	struct intel_sdvo_connector *intel_sdvo_connector;
-@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
- 	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- 
--	if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
--		&& intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
--		&& intel_sdvo->is_hdmi) {
-+	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
- 		/* enable hdmi encoding mode if supported */
- 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
- 		intel_sdvo_set_colorimetry(intel_sdvo,
- 					   SDVO_COLORIMETRY_RGB256);
- 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-+		intel_sdvo->is_hdmi = true;
- 	}
- 	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- 				       (1 << INTEL_ANALOG_CLONE_BIT));
- 
--	intel_sdvo_connector_init(encoder, connector);
-+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
-+
-+	intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
- 
- 	return true;
- }
-@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
- static bool
- intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
- {
--        struct drm_encoder *encoder = &intel_sdvo->base.enc;
--        struct drm_connector *connector;
--        struct intel_connector *intel_connector;
--        struct intel_sdvo_connector *intel_sdvo_connector;
-+	struct drm_encoder *encoder = &intel_sdvo->base.base;
-+	struct drm_connector *connector;
-+	struct intel_connector *intel_connector;
-+	struct intel_sdvo_connector *intel_sdvo_connector;
- 
- 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- 	if (!intel_sdvo_connector)
- 		return false;
- 
- 	intel_connector = &intel_sdvo_connector->base;
--        connector = &intel_connector->base;
--        encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
--        connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-+	connector = &intel_connector->base;
-+	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-+	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- 
--        intel_sdvo->controlled_output |= type;
--        intel_sdvo_connector->output_flag = type;
-+	intel_sdvo->controlled_output |= type;
-+	intel_sdvo_connector->output_flag = type;
- 
--        intel_sdvo->is_tv = true;
--        intel_sdvo->base.needs_tv_clock = true;
--        intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
-+	intel_sdvo->is_tv = true;
-+	intel_sdvo->base.needs_tv_clock = true;
-+	intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- 
--        intel_sdvo_connector_init(encoder, connector);
-+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- 
--        if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
-+	if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
- 		goto err;
- 
--        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
-+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- 		goto err;
- 
--        return true;
-+	return true;
- 
- err:
- 	intel_sdvo_destroy(connector);
-@@ -2177,43 +2124,44 @@ err:
- static bool
- intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
- {
--        struct drm_encoder *encoder = &intel_sdvo->base.enc;
--        struct drm_connector *connector;
--        struct intel_connector *intel_connector;
--        struct intel_sdvo_connector *intel_sdvo_connector;
-+	struct drm_encoder *encoder = &intel_sdvo->base.base;
-+	struct drm_connector *connector;
-+	struct intel_connector *intel_connector;
-+	struct intel_sdvo_connector *intel_sdvo_connector;
- 
- 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- 	if (!intel_sdvo_connector)
- 		return false;
- 
- 	intel_connector = &intel_sdvo_connector->base;
--        connector = &intel_connector->base;
-+	connector = &intel_connector->base;
- 	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
--        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
--        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
--
--        if (device == 0) {
--                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
--                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
--        } else if (device == 1) {
--                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
--                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
--        }
--
--        intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-+	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-+	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-+
-+	if (device == 0) {
-+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
-+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
-+	} else if (device == 1) {
-+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
-+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
-+	}
-+
-+	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- 				       (1 << INTEL_ANALOG_CLONE_BIT));
- 
--        intel_sdvo_connector_init(encoder, connector);
--        return true;
-+	intel_sdvo_connector_init(intel_sdvo_connector,
-+				  intel_sdvo);
-+	return true;
- }
- 
- static bool
- intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
- {
--        struct drm_encoder *encoder = &intel_sdvo->base.enc;
--        struct drm_connector *connector;
--        struct intel_connector *intel_connector;
--        struct intel_sdvo_connector *intel_sdvo_connector;
-+	struct drm_encoder *encoder = &intel_sdvo->base.base;
-+	struct drm_connector *connector;
-+	struct intel_connector *intel_connector;
-+	struct intel_sdvo_connector *intel_sdvo_connector;
- 
- 	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- 	if (!intel_sdvo_connector)
-@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
- 
- 	intel_connector = &intel_sdvo_connector->base;
- 	connector = &intel_connector->base;
--        encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
--        connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
--
--        if (device == 0) {
--                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
--                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
--        } else if (device == 1) {
--                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
--                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
--        }
--
--        intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
-+	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-+	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-+
-+	if (device == 0) {
-+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
-+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
-+	} else if (device == 1) {
-+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
-+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
-+	}
-+
-+	intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- 				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
- 
--        intel_sdvo_connector_init(encoder, connector);
--        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
-+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
-+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- 		goto err;
- 
- 	return true;
-@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
- 					  struct intel_sdvo_connector *intel_sdvo_connector,
- 					  int type)
- {
--	struct drm_device *dev = intel_sdvo->base.enc.dev;
-+	struct drm_device *dev = intel_sdvo->base.base.dev;
- 	struct intel_sdvo_tv_format format;
- 	uint32_t format_map, i;
- 
-@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
- 				      struct intel_sdvo_connector *intel_sdvo_connector,
- 				      struct intel_sdvo_enhancements_reply enhancements)
- {
--	struct drm_device *dev = intel_sdvo->base.enc.dev;
-+	struct drm_device *dev = intel_sdvo->base.base.dev;
- 	struct drm_connector *connector = &intel_sdvo_connector->base.base;
- 	uint16_t response, data_value[2];
- 
-@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
- 					struct intel_sdvo_connector *intel_sdvo_connector,
- 					struct intel_sdvo_enhancements_reply enhancements)
- {
--	struct drm_device *dev = intel_sdvo->base.enc.dev;
-+	struct drm_device *dev = intel_sdvo->base.base.dev;
- 	struct drm_connector *connector = &intel_sdvo_connector->base.base;
- 	uint16_t response, data_value[2];
- 
-@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
- 		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- 	else
- 		return true;
-+}
-+
-+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
-+				     struct i2c_msg *msgs,
-+				     int num)
-+{
-+	struct intel_sdvo *sdvo = adapter->algo_data;
- 
-+	if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
-+		return -EIO;
-+
-+	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
-+}
-+
-+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
-+{
-+	struct intel_sdvo *sdvo = adapter->algo_data;
-+	return sdvo->i2c->algo->functionality(sdvo->i2c);
-+}
-+
-+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
-+	.master_xfer	= intel_sdvo_ddc_proxy_xfer,
-+	.functionality	= intel_sdvo_ddc_proxy_func
-+};
-+
-+static bool
-+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
-+			  struct drm_device *dev)
-+{
-+	sdvo->ddc.owner = THIS_MODULE;
-+	sdvo->ddc.class = I2C_CLASS_DDC;
-+	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-+	sdvo->ddc.dev.parent = &dev->pdev->dev;
-+	sdvo->ddc.algo_data = sdvo;
-+	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
-+
-+	return i2c_add_adapter(&sdvo->ddc) == 0;
- }
- 
- bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
-@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	struct intel_encoder *intel_encoder;
- 	struct intel_sdvo *intel_sdvo;
--	u8 ch[0x40];
- 	int i;
--	u32 i2c_reg, ddc_reg, analog_ddc_reg;
- 
- 	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
- 	if (!intel_sdvo)
- 		return false;
- 
-+	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
-+		kfree(intel_sdvo);
-+		return false;
-+	}
-+
- 	intel_sdvo->sdvo_reg = sdvo_reg;
- 
- 	intel_encoder = &intel_sdvo->base;
- 	intel_encoder->type = INTEL_OUTPUT_SDVO;
-+	/* encoder type will be decided later */
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
- 
--	if (HAS_PCH_SPLIT(dev)) {
--		i2c_reg = PCH_GPIOE;
--		ddc_reg = PCH_GPIOE;
--		analog_ddc_reg = PCH_GPIOA;
--	} else {
--		i2c_reg = GPIOE;
--		ddc_reg = GPIOE;
--		analog_ddc_reg = GPIOA;
--	}
--
--	/* setup the DDC bus. */
--	if (IS_SDVOB(sdvo_reg))
--		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
--	else
--		intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
--
--	if (!intel_encoder->i2c_bus)
--		goto err_inteloutput;
--
--	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
--
--	/* Save the bit-banging i2c functionality for use by the DDC wrapper */
--	intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
-+	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
-+	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- 
- 	/* Read the regs to test if we can talk to the device */
- 	for (i = 0; i < 0x40; i++) {
--		if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
-+		u8 byte;
-+
-+		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- 			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- 				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
--			goto err_i2c;
-+			goto err;
- 		}
- 	}
- 
--	/* setup the DDC bus. */
--	if (IS_SDVOB(sdvo_reg)) {
--		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
--		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
--						"SDVOB/VGA DDC BUS");
-+	if (IS_SDVOB(sdvo_reg))
- 		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
--	} else {
--		intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
--		intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
--						"SDVOC/VGA DDC BUS");
-+	else
- 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
--	}
--	if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
--		goto err_i2c;
- 
--	/* Wrap with our custom algo which switches to DDC mode */
--	intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
--
--	/* encoder type will be decided later */
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
--	drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-+	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
- 
- 	/* In default case sdvo lvds is false */
- 	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
--		goto err_enc;
-+		goto err;
- 
- 	if (intel_sdvo_output_setup(intel_sdvo,
- 				    intel_sdvo->caps.output_flags) != true) {
- 		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- 			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
--		goto err_enc;
-+		goto err;
- 	}
- 
- 	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
- 
- 	/* Set the input timing to the screen. Assume always input 0. */
- 	if (!intel_sdvo_set_target_input(intel_sdvo))
--		goto err_enc;
-+		goto err;
- 
- 	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
- 						    &intel_sdvo->pixel_clock_min,
- 						    &intel_sdvo->pixel_clock_max))
--		goto err_enc;
-+		goto err;
- 
- 	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
- 			"clock range %dMHz - %dMHz, "
-@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
- 			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
- 	return true;
- 
--err_enc:
--	drm_encoder_cleanup(&intel_encoder->enc);
--err_i2c:
--	if (intel_sdvo->analog_ddc_bus != NULL)
--		intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
--	if (intel_encoder->ddc_bus != NULL)
--		intel_i2c_destroy(intel_encoder->ddc_bus);
--	if (intel_encoder->i2c_bus != NULL)
--		intel_i2c_destroy(intel_encoder->i2c_bus);
--err_inteloutput:
-+err:
-+	drm_encoder_cleanup(&intel_encoder->base);
-+	i2c_del_adapter(&intel_sdvo->ddc);
- 	kfree(intel_sdvo);
- 
- 	return false;
-diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
-index 4a117e3..2f76819 100644
---- a/drivers/gpu/drm/i915/intel_tv.c
-+++ b/drivers/gpu/drm/i915/intel_tv.c
-@@ -48,7 +48,7 @@ struct intel_tv {
- 	struct intel_encoder base;
- 
- 	int type;
--	char *tv_format;
-+	const char *tv_format;
- 	int margin[4];
- 	u32 save_TV_H_CTL_1;
- 	u32 save_TV_H_CTL_2;
-@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
- 
- 
- struct tv_mode {
--	char *name;
-+	const char *name;
- 	int clock;
- 	int refresh; /* in millihertz (for precision) */
- 	u32 oversample;
-@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
- 
- static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
- {
--	return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
-+	return container_of(encoder, struct intel_tv, base.base);
-+}
-+
-+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
-+{
-+	return container_of(intel_attached_encoder(connector),
-+			    struct intel_tv,
-+			    base);
- }
- 
- static void
-@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
- }
- 
- static const struct tv_mode *
--intel_tv_mode_lookup (char *tv_format)
-+intel_tv_mode_lookup(const char *tv_format)
- {
- 	int i;
- 
-@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
- }
- 
- static const struct tv_mode *
--intel_tv_mode_find (struct intel_tv *intel_tv)
-+intel_tv_mode_find(struct intel_tv *intel_tv)
- {
- 	return intel_tv_mode_lookup(intel_tv->tv_format);
- }
- 
- static enum drm_mode_status
--intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
-+intel_tv_mode_valid(struct drm_connector *connector,
-+		    struct drm_display_mode *mode)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
- 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- 
- 	/* Ensure TV refresh is close to desired refresh */
- 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
- 				< 1000)
- 		return MODE_OK;
-+
- 	return MODE_CLOCK_RANGE;
- }
- 
-@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 			   color_conversion->av);
- 	}
- 
--	if (IS_I965G(dev))
-+	if (INTEL_INFO(dev)->gen >= 4)
- 		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
- 	else
- 		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
-@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- 
- 		/* Wait for vblank for the disable to take effect */
--		if (!IS_I9XX(dev))
-+		if (IS_GEN2(dev))
- 			intel_wait_for_vblank(dev, intel_crtc->pipe);
- 
--		I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
-+		I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
- 		/* Wait for vblank for the disable to take effect. */
--		intel_wait_for_vblank(dev, intel_crtc->pipe);
-+		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
- 
- 		/* Filter ctl must be set before TV_WIN_SIZE */
- 		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
-@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- 		I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
- 	for (i = 0; i < 43; i++)
- 		I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
--	I915_WRITE(TV_DAC, 0);
-+	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
- 	I915_WRITE(TV_CTL, tv_ctl);
- }
- 
-@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
- static int
- intel_tv_detect_type (struct intel_tv *intel_tv)
- {
--	struct drm_encoder *encoder = &intel_tv->base.enc;
-+	struct drm_encoder *encoder = &intel_tv->base.base;
- 	struct drm_device *dev = encoder->dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
- 	unsigned long irqflags;
- 	u32 tv_ctl, save_tv_ctl;
- 	u32 tv_dac, save_tv_dac;
--	int type = DRM_MODE_CONNECTOR_Unknown;
--
--	tv_dac = I915_READ(TV_DAC);
-+	int type;
- 
- 	/* Disable TV interrupts around load detect or we'll recurse */
- 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
- 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
- 
--	/*
--	 * Detect TV by polling)
--	 */
--	save_tv_dac = tv_dac;
--	tv_ctl = I915_READ(TV_CTL);
--	save_tv_ctl = tv_ctl;
--	tv_ctl &= ~TV_ENC_ENABLE;
--	tv_ctl &= ~TV_TEST_MODE_MASK;
-+	save_tv_dac = tv_dac = I915_READ(TV_DAC);
-+	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
-+
-+	/* Poll for TV detection */
-+	tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
- 	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
--	tv_dac &= ~TVDAC_SENSE_MASK;
--	tv_dac &= ~DAC_A_MASK;
--	tv_dac &= ~DAC_B_MASK;
--	tv_dac &= ~DAC_C_MASK;
-+
-+	tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
- 	tv_dac |= (TVDAC_STATE_CHG_EN |
- 		   TVDAC_A_SENSE_CTL |
- 		   TVDAC_B_SENSE_CTL |
-@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
- 		   DAC_A_0_7_V |
- 		   DAC_B_0_7_V |
- 		   DAC_C_0_7_V);
-+
- 	I915_WRITE(TV_CTL, tv_ctl);
- 	I915_WRITE(TV_DAC, tv_dac);
- 	POSTING_READ(TV_DAC);
--	msleep(20);
- 
--	tv_dac = I915_READ(TV_DAC);
--	I915_WRITE(TV_DAC, save_tv_dac);
--	I915_WRITE(TV_CTL, save_tv_ctl);
--	POSTING_READ(TV_CTL);
--	msleep(20);
-+	intel_wait_for_vblank(intel_tv->base.base.dev,
-+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
- 
--	/*
--	 *  A B C
--	 *  0 1 1 Composite
--	 *  1 0 X svideo
--	 *  0 0 0 Component
--	 */
--	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
--		DRM_DEBUG_KMS("Detected Composite TV connection\n");
--		type = DRM_MODE_CONNECTOR_Composite;
--	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
--		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
--		type = DRM_MODE_CONNECTOR_SVIDEO;
--	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
--		DRM_DEBUG_KMS("Detected Component TV connection\n");
--		type = DRM_MODE_CONNECTOR_Component;
--	} else {
--		DRM_DEBUG_KMS("No TV connection detected\n");
--		type = -1;
-+	type = -1;
-+	if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
-+		DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
-+		/*
-+		 *  A B C
-+		 *  0 1 1 Composite
-+		 *  1 0 X svideo
-+		 *  0 0 0 Component
-+		 */
-+		if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-+			DRM_DEBUG_KMS("Detected Composite TV connection\n");
-+			type = DRM_MODE_CONNECTOR_Composite;
-+		} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-+			DRM_DEBUG_KMS("Detected S-Video TV connection\n");
-+			type = DRM_MODE_CONNECTOR_SVIDEO;
-+		} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-+			DRM_DEBUG_KMS("Detected Component TV connection\n");
-+			type = DRM_MODE_CONNECTOR_Component;
-+		} else {
-+			DRM_DEBUG_KMS("Unrecognised TV connection\n");
-+		}
- 	}
- 
-+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
-+	I915_WRITE(TV_CTL, save_tv_ctl);
-+
- 	/* Restore interrupt config */
- 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- 	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
-@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
-  */
- static void intel_tv_find_better_format(struct drm_connector *connector)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
- 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- 	int i;
- 
-@@ -1344,14 +1347,13 @@ static enum drm_connector_status
- intel_tv_detect(struct drm_connector *connector, bool force)
- {
- 	struct drm_display_mode mode;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
- 	int type;
- 
- 	mode = reported_modes[0];
- 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- 
--	if (encoder->crtc && encoder->crtc->enabled) {
-+	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- 		type = intel_tv_detect_type(intel_tv);
- 	} else if (force) {
- 		struct drm_crtc *crtc;
-@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
- 	return connector_status_connected;
- }
- 
--static struct input_res {
--	char *name;
-+static const struct input_res {
-+	const char *name;
- 	int w, h;
--} input_res_table[] =
--{
-+} input_res_table[] = {
- 	{"640x480", 640, 480},
- 	{"800x600", 800, 600},
- 	{"1024x768", 1024, 768},
-@@ -1396,8 +1397,7 @@ static void
- intel_tv_chose_preferred_modes(struct drm_connector *connector,
- 			       struct drm_display_mode *mode_ptr)
- {
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
- 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- 
- 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
-@@ -1422,15 +1422,14 @@ static int
- intel_tv_get_modes(struct drm_connector *connector)
- {
- 	struct drm_display_mode *mode_ptr;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
- 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- 	int j, count = 0;
- 	u64 tmp;
- 
- 	for (j = 0; j < ARRAY_SIZE(input_res_table);
- 	     j++) {
--		struct input_res *input = &input_res_table[j];
-+		const struct input_res *input = &input_res_table[j];
- 		unsigned int hactive_s = input->w;
- 		unsigned int vactive_s = input->h;
- 
-@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
- 		      uint64_t val)
- {
- 	struct drm_device *dev = connector->dev;
--	struct drm_encoder *encoder = intel_attached_encoder(connector);
--	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
--	struct drm_crtc *crtc = encoder->crtc;
-+	struct intel_tv *intel_tv = intel_attached_tv(connector);
-+	struct drm_crtc *crtc = intel_tv->base.base.crtc;
- 	int ret = 0;
- 	bool changed = false;
- 
-@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
- static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
- 	.mode_valid = intel_tv_mode_valid,
- 	.get_modes = intel_tv_get_modes,
--	.best_encoder = intel_attached_encoder,
-+	.best_encoder = intel_best_encoder,
- };
- 
- static const struct drm_encoder_funcs intel_tv_enc_funcs = {
-@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
- 	struct intel_encoder *intel_encoder;
- 	struct intel_connector *intel_connector;
- 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
--	char **tv_format_names;
-+	char *tv_format_names[ARRAY_SIZE(tv_modes)];
- 	int i, initial_mode = 0;
- 
- 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
-@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
- 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
- 			   DRM_MODE_CONNECTOR_SVIDEO);
- 
--	drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
-+	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
- 			 DRM_MODE_ENCODER_TVDAC);
- 
--	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
-+	intel_connector_attach_encoder(intel_connector, intel_encoder);
- 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
- 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- 	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
--	intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
--	intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
-+	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
-+	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
- 
- 	/* BIOS margin values */
-@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
- 	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
- 	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- 
--	intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
-+	intel_tv->tv_format = tv_modes[initial_mode].name;
- 
--	drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
-+	drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
- 	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
- 	connector->interlace_allowed = false;
- 	connector->doublescan_allowed = false;
- 
- 	/* Create TV properties then attach current values */
--	tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
--				  GFP_KERNEL);
--	if (!tv_format_names)
--		goto out;
- 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
--		tv_format_names[i] = tv_modes[i].name;
--	drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
-+		tv_format_names[i] = (char *)tv_modes[i].name;
-+	drm_mode_create_tv_properties(dev,
-+				      ARRAY_SIZE(tv_modes),
-+				      tv_format_names);
- 
- 	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
- 				   initial_mode);
-@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
- 	drm_connector_attach_property(connector,
- 				   dev->mode_config.tv_bottom_margin_property,
- 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
--out:
- 	drm_sysfs_connector_add(connector);
- }
-diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
-index 3e5a51a..a4c66f6 100644
---- a/include/drm/drm_crtc.h
-+++ b/include/drm/drm_crtc.h
-@@ -762,6 +762,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- 				    void *data, struct drm_file *file_priv);
- extern bool drm_detect_hdmi_monitor(struct edid *edid);
-+extern bool drm_detect_monitor_audio(struct edid *edid);
- extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
- 				    void *data, struct drm_file *file_priv);
- extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
-diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
-index a49e791..83a389e 100644
---- a/include/drm/drm_dp_helper.h
-+++ b/include/drm/drm_dp_helper.h
-@@ -23,6 +23,9 @@
- #ifndef _DRM_DP_HELPER_H_
- #define _DRM_DP_HELPER_H_
- 
-+#include <linux/types.h>
-+#include <linux/i2c.h>
-+
- /* From the VESA DisplayPort spec */
- 
- #define AUX_NATIVE_WRITE	0x8
-diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
-index e41c74f..8c641be 100644
---- a/include/drm/i915_drm.h
-+++ b/include/drm/i915_drm.h
-@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
- #define I915_PARAM_HAS_PAGEFLIPPING	 8
- #define I915_PARAM_HAS_EXECBUF2          9
- #define I915_PARAM_HAS_BSD		 10
-+#define I915_PARAM_HAS_BLT		 11
- 
- typedef struct drm_i915_getparam {
- 	int param;
-@@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
- 	__u32 num_cliprects;
- 	/** This is a struct drm_clip_rect *cliprects */
- 	__u64 cliprects_ptr;
-+#define I915_EXEC_RING_MASK              (7<<0)
-+#define I915_EXEC_DEFAULT                (0<<0)
- #define I915_EXEC_RENDER                 (1<<0)
--#define I915_EXEC_BSD                    (1<<1)
-+#define I915_EXEC_BSD                    (2<<0)
-+#define I915_EXEC_BLT                    (3<<0)
- 	__u64 flags;
- 	__u64 rsvd1;
- 	__u64 rsvd2;
-diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
-new file mode 100644
-index 0000000..d3c8194
---- /dev/null
-+++ b/include/drm/intel-gtt.h
-@@ -0,0 +1,18 @@
-+/* Common header for intel-gtt.ko and i915.ko */
-+
-+#ifndef _DRM_INTEL_GTT_H
-+#define	_DRM_INTEL_GTT_H
-+struct intel_gtt {
-+	/* Number of stolen gtt entries at the beginning. */
-+	unsigned int gtt_stolen_entries;
-+	/* Total number of gtt entries. */
-+	unsigned int gtt_total_entries;
-+	/* Part of the gtt that is mappable by the cpu, for those chips where
-+	 * this is not the full gtt. */
-+	unsigned int gtt_mappable_entries;
-+};
-+
-+struct intel_gtt *intel_gtt_get(void);
-+
-+#endif
-+
diff --git a/drm-intel-big-hammer.patch b/drm-intel-big-hammer.patch
index 0d7f7f08d..63dc016b1 100644
--- a/drm-intel-big-hammer.patch
+++ b/drm-intel-big-hammer.patch
@@ -1,16 +1,16 @@
 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 6da2c6d..f508b86 100644
+index 37427e4..08af9db 100644
 --- a/drivers/gpu/drm/i915/i915_gem.c
 +++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -3738,6 +3738,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	if (ret)
- 		goto pre_mutex_err;
+@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 
+ 	mutex_lock(&dev->struct_mutex);
  
 +	/* We don't get the flushing right for these chipsets, use the
-+	 * big hammer for now to avoid random crashiness. */
++	 * big hamer for now to avoid random crashiness. */
 +	if (IS_I85X(dev) || IS_I865G(dev))
 +		wbinvd();
 +
- 	if (dev_priv->mm.suspended) {
- 		mutex_unlock(&dev->struct_mutex);
- 		ret = -EBUSY;
+ 	i915_verify_inactive(dev, __FILE__, __LINE__);
+ 
+ 	if (dev_priv->mm.wedged) {
diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch
index 6c089b89e..5ca0152da 100644
--- a/drm-intel-make-lvds-work.patch
+++ b/drm-intel-make-lvds-work.patch
@@ -1,20 +1,19 @@
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 0cece04..63bbb4b 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -4580,7 +4580,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- 				    struct drm_connector *connector, int dpms_mode)
+diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c
+--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig	2010-03-31 16:59:39.901995671 -0400
++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c	2010-03-31 17:01:05.416996744 -0400
+@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p
+ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
  {
- 	struct drm_encoder *encoder = &intel_encoder->base;
+ 	struct drm_encoder *encoder = &intel_encoder->enc;
 -	struct drm_device *dev = encoder->dev;
  	struct drm_crtc *crtc = encoder->crtc;
  	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-@@ -4590,7 +4589,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- 		connector->encoder = NULL;
+@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru
+ 		intel_encoder->base.encoder = NULL;
  		intel_encoder->load_detect_temp = false;
  		crtc->enabled = drm_helper_crtc_in_use(crtc);
 -		drm_helper_disable_unused_functions(dev);
  	}
  
- 	/* Switch crtc and encoder back off if necessary */
+ 	/* Switch crtc and output back off if necessary */
diff --git a/kernel.spec b/kernel.spec
index c90b027a7..8ba295ee1 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1226,7 +1226,7 @@ ApplyPatch acpi-update-battery-information-on-notification-0x81.patch
 ApplyPatch linux-2.6-debug-sizeof-structs.patch
 ApplyPatch linux-2.6-debug-nmi-timeout.patch
 ApplyPatch linux-2.6-debug-taint-vm.patch
-###FIX###ApplyPatch linux-2.6-debug-vm-would-have-oomkilled.patch
+ApplyPatch linux-2.6-debug-vm-would-have-oomkilled.patch
 ApplyPatch linux-2.6-debug-always-inline-kzalloc.patch
 
 ApplyPatch debug-tty-print-dev-name.patch
@@ -1302,9 +1302,8 @@ ApplyPatch fix_xen_guest_on_old_EC2.patch
 ApplyOptionalPatch drm-nouveau-updates.patch
 
 # Intel DRM
-#ApplyPatch drm-intel-2.6.37-rc2.patch
-#ApplyPatch drm-intel-big-hammer.patch
-#ApplyPatch drm-intel-make-lvds-work.patch
+ApplyPatch drm-intel-big-hammer.patch
+ApplyPatch drm-intel-make-lvds-work.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
 ApplyPatch radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
diff --git a/linux-2.6-debug-vm-would-have-oomkilled.patch b/linux-2.6-debug-vm-would-have-oomkilled.patch
index dd8ba3f0a..8bd05ab06 100644
--- a/linux-2.6-debug-vm-would-have-oomkilled.patch
+++ b/linux-2.6-debug-vm-would-have-oomkilled.patch
@@ -1,26 +1,30 @@
-From 03657519851cd180983db4bd0c38eaeed4aa2962 Mon Sep 17 00:00:00 2001
-From: Kyle McMartin <kyle@treachery.i.jkkm.org>
-Date: Mon, 11 Jan 2010 08:25:12 -0500
-Subject: linux-2.6-debug-vm-would-have-oomkilled.patch
+From beb764ac03e52eba1a654afb4273fab1f9de3cff Mon Sep 17 00:00:00 2001
+From: Kyle McMartin <kyle@mcmartin.ca>
+Date: Mon, 29 Nov 2010 20:59:14 -0500
+Subject: [PATCH] linux-2.6-debug-vm-would_have_oomkilled
 
 ---
- kernel/sysctl.c |    8 ++++++++
- mm/oom_kill.c   |    7 +++++++
- 2 files changed, 15 insertions(+), 0 deletions(-)
+ include/linux/oom.h |    1 +
+ kernel/sysctl.c     |    7 +++++++
+ mm/oom_kill.c       |    8 ++++++++
+ 3 files changed, 16 insertions(+), 0 deletions(-)
 
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 5e3aa83..79a27b4 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -72,5 +72,6 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+ extern int sysctl_oom_dump_tasks;
+ extern int sysctl_oom_kill_allocating_task;
+ extern int sysctl_panic_on_oom;
++extern int sysctl_would_have_oomkilled;
+ #endif /* __KERNEL__*/
+ #endif /* _INCLUDE_LINUX_OOM_H */
 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 8a68b24..72a4ff1 100644
+index 5abfa15..a0fed6d 100644
 --- a/kernel/sysctl.c
 +++ b/kernel/sysctl.c
-@@ -71,6 +71,7 @@ extern int sysctl_overcommit_ratio;
- extern int sysctl_panic_on_oom;
- extern int sysctl_oom_kill_allocating_task;
- extern int sysctl_oom_dump_tasks;
-+extern int sysctl_would_have_oomkilled;
- extern int max_threads;
- extern int core_uses_pid;
- extern int suid_dumpable;
-@@ -973,6 +974,13 @@ static struct ctl_table vm_table[] = {
+@@ -1000,6 +1000,13 @@ static struct ctl_table vm_table[] = {
  		.proc_handler	= proc_dointvec,
  	},
  	{
@@ -28,37 +32,38 @@ index 8a68b24..72a4ff1 100644
 +		.data		= &sysctl_would_have_oomkilled,
 +		.maxlen		= sizeof(sysctl_would_have_oomkilled),
 +		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec,
++		.proc_handler	= proc_dointvec,
 +	},
 +	{
  		.procname	= "overcommit_ratio",
  		.data		= &sysctl_overcommit_ratio,
  		.maxlen		= sizeof(sysctl_overcommit_ratio),
 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
-index f52481b..a892f07 100644
+index 7dcca55..281ac39 100644
 --- a/mm/oom_kill.c
 +++ b/mm/oom_kill.c
-@@ -31,6 +31,7 @@
+@@ -35,6 +35,7 @@
  int sysctl_panic_on_oom;
  int sysctl_oom_kill_allocating_task;
- int sysctl_oom_dump_tasks;
+ int sysctl_oom_dump_tasks = 1;
 +int sysctl_would_have_oomkilled;
  static DEFINE_SPINLOCK(zone_scan_lock);
- /* #define DEBUG */
  
-@@ -396,6 +397,12 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
- 		return;
+ #ifdef CONFIG_NUMA
+@@ -477,6 +478,13 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  	}
  
-+	if (sysctl_would_have_oomkilled == 1) {
-+		printk(KERN_ERR "Would have killed process %d (%s). But continuing instead.\n",
-+				task_pid_nr(p), p->comm);
-+		return;
+ 	task_lock(p);
++	if (sysctl_would_have_oomkilled) {
++		printk(KERN_ERR "%s: would have killed process %d (%s), but continuing instead...\n",
++			__func__, task_pid_nr(p), p->comm);
++		task_unlock(p);
++		return 0;
 +	}
 +
- 	if (verbose)
- 		printk(KERN_ERR "Killed process %d (%s) "
- 		       "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ 	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
+ 		message, task_pid_nr(p), p->comm, points);
+ 	task_unlock(p);
 -- 
-1.6.5.2
+1.7.3.2
 

From 1c8b1fa25a61651c0436956da3208c264b9d4930 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Tue, 30 Nov 2010 18:38:47 -0500
Subject: [PATCH 47/56] walters noticed i missed removing it from the manifest

---
 kernel.spec | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 8ba295ee1..b8af0b452 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -653,7 +653,6 @@ Patch1555: fix_xen_guest_on_old_EC2.patch
 
 # nouveau + drm fixes
 Patch1810: drm-nouveau-updates.patch
-Patch1811: drm-intel-2.6.37-rc2.patch
 Patch1819: drm-intel-big-hammer.patch
 # make sure the lvds comes back on lid open
 Patch1825: drm-intel-make-lvds-work.patch

From fe7214079351929fe3145b453996a862e1606f4d Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@mcmartin.ca>
Date: Thu, 2 Dec 2010 10:29:34 -0500
Subject: [PATCH 48/56] kswapd fixes from mmotm

---
 kernel.spec                                   |  14 +-
 ...counter-threshold-when-memory-is-low.patch | 389 ++++++++++++++++++
 ...back-for-adjusting-percpu-thresholds.patch | 167 ++++++++
 3 files changed, 569 insertions(+), 1 deletion(-)
 create mode 100644 mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 create mode 100644 mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch

diff --git a/kernel.spec b/kernel.spec
index b8af0b452..c8bbe54b7 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 10
+%global baserelease 11
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -753,6 +753,9 @@ Patch12415: tty-dont-allow-reopen-when-ldisc-is-changing.patch
 Patch12416: tty-ldisc-fix-open-flag-handling.patch
 Patch12417: tty-open-hangup-race-fixup.patch
 
+Patch12420: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
+Patch12421: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1408,6 +1411,10 @@ ApplyPatch tty-dont-allow-reopen-when-ldisc-is-changing.patch
 ApplyPatch tty-ldisc-fix-open-flag-handling.patch
 ApplyPatch tty-open-hangup-race-fixup.patch
 
+# backport some fixes for kswapd from mmotm, rhbz#649694
+ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
+ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -2021,6 +2028,11 @@ fi
 #                 ||     ||
 
 %changelog
+* Thu Dec 02 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-11
+- Grab some of Mel's fixes from -mmotm to hopefully sort out #649694.
+  They've been tested by a few on that bug on 2.6.35, but let's push
+  it out to a bigger audience.
+
 * Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com>
 - PNP: log PNP resources, as we do for PCI [c1f3f281]
   should help us debug resource conflicts (requested by bjorn.)
diff --git a/mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch b/mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
new file mode 100644
index 000000000..561c5897e
--- /dev/null
+++ b/mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
@@ -0,0 +1,389 @@
+From df43fae25437d7bc7dfff72599c1e825038b67cf Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Wed, 24 Nov 2010 22:18:23 -0500
+Subject: [PATCH 1/2] mm: page allocator: Adjust the per-cpu counter threshold when memory is low
+
+Commit aa45484 ("calculate a better estimate of NR_FREE_PAGES when memory
+is low") noted that watermarks were based on the vmstat NR_FREE_PAGES.  To
+avoid synchronization overhead, these counters are maintained on a per-cpu
+basis and drained both periodically and when a threshold is above a
+threshold.  On large CPU systems, the difference between the estimate and
+real value of NR_FREE_PAGES can be very high.  The system can get into a
+case where pages are allocated far below the min watermark potentially
+causing livelock issues.  The commit solved the problem by taking a better
+reading of NR_FREE_PAGES when memory was low.
+
+Unfortately, as reported by Shaohua Li this accurate reading can consume a
+large amount of CPU time on systems with many sockets due to cache line
+bouncing.  This patch takes a different approach.  For large machines
+where counter drift might be unsafe and while kswapd is awake, the per-cpu
+thresholds for the target pgdat are reduced to limit the level of drift to
+what should be a safe level.  This incurs a performance penalty in heavy
+memory pressure by a factor that depends on the workload and the machine
+but the machine should function correctly without accidentally exhausting
+all memory on a node.  There is an additional cost when kswapd wakes and
+sleeps but the event is not expected to be frequent - in Shaohua's test
+case, there was one recorded sleep and wake event at least.
+
+To ensure that kswapd wakes up, a safe version of zone_watermark_ok() is
+introduced that takes a more accurate reading of NR_FREE_PAGES when called
+from wakeup_kswapd, when deciding whether it is really safe to go back to
+sleep in sleeping_prematurely() and when deciding if a zone is really
+balanced or not in balance_pgdat().  We are still using an expensive
+function but limiting how often it is called.
+
+When the test case is reproduced, the time spent in the watermark
+functions is reduced.  The following report is on the percentage of time
+spent cumulatively spent in the functions zone_nr_free_pages(),
+zone_watermark_ok(), __zone_watermark_ok(), zone_watermark_ok_safe(),
+zone_page_state_snapshot(), zone_page_state().
+
+vanilla                      11.6615%
+disable-threshold            0.2584%
+
+Reported-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: Christoph Lameter <cl@linux.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[[http://userweb.kernel.org/~akpm/mmotm/broken-out/mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch]]
+---
+ include/linux/mmzone.h |   10 ++-----
+ include/linux/vmstat.h |    5 +++
+ mm/mmzone.c            |   21 ---------------
+ mm/page_alloc.c        |   35 +++++++++++++++++++-----
+ mm/vmscan.c            |   23 +++++++++-------
+ mm/vmstat.c            |   68 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 6 files changed, 115 insertions(+), 47 deletions(-)
+
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 3984c4e..8d789d7 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -448,12 +448,6 @@ static inline int zone_is_oom_locked(const struct zone *zone)
+ 	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+ }
+ 
+-#ifdef CONFIG_SMP
+-unsigned long zone_nr_free_pages(struct zone *zone);
+-#else
+-#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+-#endif /* CONFIG_SMP */
+-
+ /*
+  * The "priority" of VM scanning is how much of the queues we will scan in one
+  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
+@@ -651,7 +645,9 @@ typedef struct pglist_data {
+ extern struct mutex zonelists_mutex;
+ void build_all_zonelists(void *data);
+ void wakeup_kswapd(struct zone *zone, int order);
+-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++		int classzone_idx, int alloc_flags);
++bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+ 		int classzone_idx, int alloc_flags);
+ enum memmap_context {
+ 	MEMMAP_EARLY,
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index eaaea37..e4cc21c 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -254,6 +254,8 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
+ extern void __dec_zone_state(struct zone *, enum zone_stat_item);
+ 
+ void refresh_cpu_vm_stats(int);
++void reduce_pgdat_percpu_threshold(pg_data_t *pgdat);
++void restore_pgdat_percpu_threshold(pg_data_t *pgdat);
+ #else /* CONFIG_SMP */
+ 
+ /*
+@@ -298,6 +300,9 @@ static inline void __dec_zone_page_state(struct page *page,
+ #define dec_zone_page_state __dec_zone_page_state
+ #define mod_zone_page_state __mod_zone_page_state
+ 
++static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { }
++static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { }
++
+ static inline void refresh_cpu_vm_stats(int cpu) { }
+ #endif
+ 
+diff --git a/mm/mmzone.c b/mm/mmzone.c
+index e35bfb8..f5b7d17 100644
+--- a/mm/mmzone.c
++++ b/mm/mmzone.c
+@@ -87,24 +87,3 @@ int memmap_valid_within(unsigned long pfn,
+ 	return 1;
+ }
+ #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+-
+-#ifdef CONFIG_SMP
+-/* Called when a more accurate view of NR_FREE_PAGES is needed */
+-unsigned long zone_nr_free_pages(struct zone *zone)
+-{
+-	unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+-
+-	/*
+-	 * While kswapd is awake, it is considered the zone is under some
+-	 * memory pressure. Under pressure, there is a risk that
+-	 * per-cpu-counter-drift will allow the min watermark to be breached
+-	 * potentially causing a live-lock. While kswapd is awake and
+-	 * free pages are low, get a better estimate for free pages
+-	 */
+-	if (nr_free_pages < zone->percpu_drift_mark &&
+-			!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+-		return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+-
+-	return nr_free_pages;
+-}
+-#endif /* CONFIG_SMP */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index f12ad18..0286150 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1454,24 +1454,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+ #endif /* CONFIG_FAIL_PAGE_ALLOC */
+ 
+ /*
+- * Return 1 if free pages are above 'mark'. This takes into account the order
++ * Return true if free pages are above 'mark'. This takes into account the order
+  * of the allocation.
+  */
+-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+-		      int classzone_idx, int alloc_flags)
++static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++		      int classzone_idx, int alloc_flags, long free_pages)
+ {
+ 	/* free_pages my go negative - that's OK */
+ 	long min = mark;
+-	long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
+ 	int o;
+ 
++	free_pages -= (1 << order) + 1;
+ 	if (alloc_flags & ALLOC_HIGH)
+ 		min -= min / 2;
+ 	if (alloc_flags & ALLOC_HARDER)
+ 		min -= min / 4;
+ 
+ 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+-		return 0;
++		return false;
+ 	for (o = 0; o < order; o++) {
+ 		/* At the next order, this order's pages become unavailable */
+ 		free_pages -= z->free_area[o].nr_free << o;
+@@ -1480,9 +1480,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ 		min >>= 1;
+ 
+ 		if (free_pages <= min)
+-			return 0;
++			return false;
+ 	}
+-	return 1;
++	return true;
++}
++
++bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++		      int classzone_idx, int alloc_flags)
++{
++	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
++					zone_page_state(z, NR_FREE_PAGES));
++}
++
++bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
++		      int classzone_idx, int alloc_flags)
++{
++	long free_pages = zone_page_state(z, NR_FREE_PAGES);
++
++	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
++		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
++
++	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
++								free_pages);
+ }
+ 
+ #ifdef CONFIG_NUMA
+@@ -2436,7 +2455,7 @@ void show_free_areas(void)
+ 			" all_unreclaimable? %s"
+ 			"\n",
+ 			zone->name,
+-			K(zone_nr_free_pages(zone)),
++			K(zone_page_state(zone, NR_FREE_PAGES)),
+ 			K(min_wmark_pages(zone)),
+ 			K(low_wmark_pages(zone)),
+ 			K(high_wmark_pages(zone)),
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index c5dfabf..3e71cb1 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2082,7 +2082,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+ 		if (zone->all_unreclaimable)
+ 			continue;
+ 
+-		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
++		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+ 								0, 0))
+ 			return 1;
+ 	}
+@@ -2169,7 +2169,7 @@ loop_again:
+ 				shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ 							&sc, priority, 0);
+ 
+-			if (!zone_watermark_ok(zone, order,
++			if (!zone_watermark_ok_safe(zone, order,
+ 					high_wmark_pages(zone), 0, 0)) {
+ 				end_zone = i;
+ 				break;
+@@ -2215,7 +2215,7 @@ loop_again:
+ 			 * We put equal pressure on every zone, unless one
+ 			 * zone has way too many pages free already.
+ 			 */
+-			if (!zone_watermark_ok(zone, order,
++			if (!zone_watermark_ok_safe(zone, order,
+ 					8*high_wmark_pages(zone), end_zone, 0))
+ 				shrink_zone(priority, zone, &sc);
+ 			reclaim_state->reclaimed_slab = 0;
+@@ -2236,7 +2236,7 @@ loop_again:
+ 			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
+ 				sc.may_writepage = 1;
+ 
+-			if (!zone_watermark_ok(zone, order,
++			if (!zone_watermark_ok_safe(zone, order,
+ 					high_wmark_pages(zone), end_zone, 0)) {
+ 				all_zones_ok = 0;
+ 				/*
+@@ -2244,7 +2244,7 @@ loop_again:
+ 				 * means that we have a GFP_ATOMIC allocation
+ 				 * failure risk. Hurry up!
+ 				 */
+-				if (!zone_watermark_ok(zone, order,
++				if (!zone_watermark_ok_safe(zone, order,
+ 					    min_wmark_pages(zone), end_zone, 0))
+ 					has_under_min_watermark_zone = 1;
+ 			}
+@@ -2378,7 +2378,9 @@ static int kswapd(void *p)
+ 				 */
+ 				if (!sleeping_prematurely(pgdat, order, remaining)) {
+ 					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
++					restore_pgdat_percpu_threshold(pgdat);
+ 					schedule();
++					reduce_pgdat_percpu_threshold(pgdat);
+ 				} else {
+ 					if (remaining)
+ 						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+@@ -2417,16 +2419,17 @@ void wakeup_kswapd(struct zone *zone, int order)
+ 	if (!populated_zone(zone))
+ 		return;
+ 
+-	pgdat = zone->zone_pgdat;
+-	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
++	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ 		return;
++	pgdat = zone->zone_pgdat;
+ 	if (pgdat->kswapd_max_order < order)
+ 		pgdat->kswapd_max_order = order;
+-	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
+-	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+-		return;
+ 	if (!waitqueue_active(&pgdat->kswapd_wait))
+ 		return;
++	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
++		return;
++
++	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
+ 	wake_up_interruptible(&pgdat->kswapd_wait);
+ }
+ 
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 355a9e6..4d7faeb 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -81,6 +81,30 @@ EXPORT_SYMBOL(vm_stat);
+ 
+ #ifdef CONFIG_SMP
+ 
++static int calculate_pressure_threshold(struct zone *zone)
++{
++	int threshold;
++	int watermark_distance;
++
++	/*
++	 * As vmstats are not up to date, there is drift between the estimated
++	 * and real values. For high thresholds and a high number of CPUs, it
++	 * is possible for the min watermark to be breached while the estimated
++	 * value looks fine. The pressure threshold is a reduced value such
++	 * that even the maximum amount of drift will not accidentally breach
++	 * the min watermark
++	 */
++	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
++	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
++
++	/*
++	 * Maximum threshold is 125
++	 */
++	threshold = min(125, threshold);
++
++	return threshold;
++}
++
+ static int calculate_threshold(struct zone *zone)
+ {
+ 	int threshold;
+@@ -159,6 +183,48 @@ static void refresh_zone_stat_thresholds(void)
+ 	}
+ }
+ 
++void reduce_pgdat_percpu_threshold(pg_data_t *pgdat)
++{
++	struct zone *zone;
++	int cpu;
++	int threshold;
++	int i;
++
++	get_online_cpus();
++	for (i = 0; i < pgdat->nr_zones; i++) {
++		zone = &pgdat->node_zones[i];
++		if (!zone->percpu_drift_mark)
++			continue;
++
++		threshold = calculate_pressure_threshold(zone);
++		for_each_online_cpu(cpu)
++			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
++							= threshold;
++	}
++	put_online_cpus();
++}
++
++void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
++{
++	struct zone *zone;
++	int cpu;
++	int threshold;
++	int i;
++
++	get_online_cpus();
++	for (i = 0; i < pgdat->nr_zones; i++) {
++		zone = &pgdat->node_zones[i];
++		if (!zone->percpu_drift_mark)
++			continue;
++
++		threshold = calculate_threshold(zone);
++		for_each_online_cpu(cpu)
++			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
++							= threshold;
++	}
++	put_online_cpus();
++}
++
+ /*
+  * For use when we know that interrupts are disabled.
+  */
+@@ -826,7 +892,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
+ 		   "\n        scanned  %lu"
+ 		   "\n        spanned  %lu"
+ 		   "\n        present  %lu",
+-		   zone_nr_free_pages(zone),
++		   zone_page_state(zone, NR_FREE_PAGES),
+ 		   min_wmark_pages(zone),
+ 		   low_wmark_pages(zone),
+ 		   high_wmark_pages(zone),
+-- 
+1.7.3.2
+
diff --git a/mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch b/mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
new file mode 100644
index 000000000..058b1399a
--- /dev/null
+++ b/mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
@@ -0,0 +1,167 @@
+From 82e3d4969144377d13da97d511e849e8cf3e6dcc Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Wed, 24 Nov 2010 22:24:24 -0500
+Subject: [PATCH 2/2] mm: vmstat: Use a single setter function and callback for adjusting percpu thresholds
+
+reduce_pgdat_percpu_threshold() and restore_pgdat_percpu_threshold() exist
+to adjust the per-cpu vmstat thresholds while kswapd is awake to avoid
+errors due to counter drift.  The functions duplicate some code so this
+patch replaces them with a single set_pgdat_percpu_threshold() that takes
+a callback function to calculate the desired threshold as a parameter.
+
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: Christoph Lameter <cl@linux.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[the various mmotm patches updating this were rolled up. --kyle]
+[[http://userweb.kernel.org/~akpm/mmotm/broken-out/mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds-fix-set_pgdat_percpu_threshold-dont-use-for_each_online_cpu.patch]]
+---
+ include/linux/vmstat.h |   10 ++++++----
+ mm/vmscan.c            |   19 +++++++++++++++++--
+ mm/vmstat.c            |   36 +++++++-----------------------------
+ 3 files changed, 30 insertions(+), 35 deletions(-)
+
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index e4cc21c..833e676 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -254,8 +254,11 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
+ extern void __dec_zone_state(struct zone *, enum zone_stat_item);
+ 
+ void refresh_cpu_vm_stats(int);
+-void reduce_pgdat_percpu_threshold(pg_data_t *pgdat);
+-void restore_pgdat_percpu_threshold(pg_data_t *pgdat);
++
++int calculate_pressure_threshold(struct zone *zone);
++int calculate_normal_threshold(struct zone *zone);
++void set_pgdat_percpu_threshold(pg_data_t *pgdat,
++				int (*calculate_pressure)(struct zone *));
+ #else /* CONFIG_SMP */
+ 
+ /*
+@@ -300,8 +303,7 @@ static inline void __dec_zone_page_state(struct page *page,
+ #define dec_zone_page_state __dec_zone_page_state
+ #define mod_zone_page_state __mod_zone_page_state
+ 
+-static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { }
+-static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { }
++#define set_pgdat_percpu_threshold(pgdat, callback) { }
+ 
+ static inline void refresh_cpu_vm_stats(int cpu) { }
+ #endif
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 3e71cb1..ba39948 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2378,9 +2378,24 @@ static int kswapd(void *p)
+ 				 */
+ 				if (!sleeping_prematurely(pgdat, order, remaining)) {
+ 					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
+-					restore_pgdat_percpu_threshold(pgdat);
++
++					/*
++					 * vmstat counters are not perfectly
++					 * accurate and the estimated value
++					 * for counters such as NR_FREE_PAGES
++					 * can deviate from the true value by
++					 * nr_online_cpus * threshold. To
++					 * avoid the zone watermarks being
++					 * breached while under pressure, we
++					 * reduce the per-cpu vmstat threshold
++					 * while kswapd is awake and restore
++					 * them before going back to sleep.
++					 */
++					set_pgdat_percpu_threshold(pgdat,
++						calculate_normal_threshold);
+ 					schedule();
+-					reduce_pgdat_percpu_threshold(pgdat);
++					set_pgdat_percpu_threshold(pgdat,
++						calculate_pressure_threshold);
+ 				} else {
+ 					if (remaining)
+ 						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 4d7faeb..511c2c0 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -81,7 +81,7 @@ EXPORT_SYMBOL(vm_stat);
+ 
+ #ifdef CONFIG_SMP
+ 
+-static int calculate_pressure_threshold(struct zone *zone)
++int calculate_pressure_threshold(struct zone *zone)
+ {
+ 	int threshold;
+ 	int watermark_distance;
+@@ -105,7 +105,7 @@ static int calculate_pressure_threshold(struct zone *zone)
+ 	return threshold;
+ }
+ 
+-static int calculate_threshold(struct zone *zone)
++int calculate_normal_threshold(struct zone *zone)
+ {
+ 	int threshold;
+ 	int mem;	/* memory in 128 MB units */
+@@ -164,7 +164,7 @@ static void refresh_zone_stat_thresholds(void)
+ 	for_each_populated_zone(zone) {
+ 		unsigned long max_drift, tolerate_drift;
+ 
+-		threshold = calculate_threshold(zone);
++		threshold = calculate_normal_threshold(zone);
+ 
+ 		for_each_online_cpu(cpu)
+ 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+@@ -183,46 +183,24 @@ static void refresh_zone_stat_thresholds(void)
+ 	}
+ }
+ 
+-void reduce_pgdat_percpu_threshold(pg_data_t *pgdat)
++void set_pgdat_percpu_threshold(pg_data_t *pgdat,
++				int (*calculate_pressure)(struct zone *))
+ {
+ 	struct zone *zone;
+ 	int cpu;
+ 	int threshold;
+ 	int i;
+ 
+-	get_online_cpus();
+-	for (i = 0; i < pgdat->nr_zones; i++) {
+-		zone = &pgdat->node_zones[i];
+-		if (!zone->percpu_drift_mark)
+-			continue;
+-
+-		threshold = calculate_pressure_threshold(zone);
+-		for_each_online_cpu(cpu)
+-			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+-							= threshold;
+-	}
+-	put_online_cpus();
+-}
+-
+-void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
+-{
+-	struct zone *zone;
+-	int cpu;
+-	int threshold;
+-	int i;
+-
+-	get_online_cpus();
+ 	for (i = 0; i < pgdat->nr_zones; i++) {
+ 		zone = &pgdat->node_zones[i];
+ 		if (!zone->percpu_drift_mark)
+ 			continue;
+ 
+-		threshold = calculate_threshold(zone);
+-		for_each_online_cpu(cpu)
++		threshold = (*calculate_pressure)(zone);
++		for_each_possible_cpu(cpu)
+ 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ 							= threshold;
+ 	}
+-	put_online_cpus();
+ }
+ 
+ /*
+-- 
+1.7.3.2
+

From aa5692446536161e0d9bcc123b5450d26d7d60c6 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Fri, 3 Dec 2010 07:09:39 -0500
Subject: [PATCH 49/56] enable hpilo.ko on x86_64

---
 config-x86_64-generic | 2 ++
 kernel.spec           | 5 ++++-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/config-x86_64-generic b/config-x86_64-generic
index 3d8d67b06..b2e4a2531 100644
--- a/config-x86_64-generic
+++ b/config-x86_64-generic
@@ -419,3 +419,5 @@ CONFIG_XEN_PLATFORM_PCI=m
 # CONFIG_ACPI_QUICKSTART is not set
 CONFIG_IDEAPAD_ACPI=m
 CONFIG_INTEL_IPS=m
+
+CONFIG_HP_ILO=m
diff --git a/kernel.spec b/kernel.spec
index c8bbe54b7..83f60616d 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -2028,7 +2028,10 @@ fi
 #                 ||     ||
 
 %changelog
-* Thu Dec 02 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-11
+* Fri Dec 03 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-11
+- Enable HP ILO on x86_64. (#571329)
+
+* Thu Dec 02 2010 Kyle McMartin <kyle@redhat.com>
 - Grab some of Mel's fixes from -mmotm to hopefully sort out #649694.
   They've been tested by a few on that bug on 2.6.35, but let's push
   it out to a bigger audience.

From a67a18274b175bfa5b43183096d46710270b44d0 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Sat, 4 Dec 2010 12:17:29 -0500
Subject: [PATCH 50/56] enable cplus_demangle in perf

---
 kernel.spec | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 83f60616d..c9bd53beb 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1722,7 +1722,7 @@ BuildKernel %make_target %kernel_image smp
 %endif
 
 %global perf_make \
-  make %{?_smp_mflags} -C tools/perf -s V=1 NO_DEMANGLE=1 prefix=%{_prefix}
+  make %{?_smp_mflags} -C tools/perf -s V=1 HAVE_CPLUS_DEMANGLE=1 prefix=%{_prefix}
 %if %{with_perf}
 %{perf_make} all
 %{perf_make} man || %{doc_build_fail}
@@ -2028,6 +2028,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Sat Dec 04 2010 Kyle McMartin <kyle@redhat.com>
+- Enable C++ symbol demangling with perf by linking against libiberty.a,
+  which is LGPL2.
+
 * Fri Dec 03 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-11
 - Enable HP ILO on x86_64. (#571329)
 

From 214d0ae2ff118f73730e4c4532d5c2b758dd4d28 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Mon, 6 Dec 2010 10:41:38 -0500
Subject: [PATCH 51/56] don't prune kernel-devel in the %with_headers install
 section

commit c1bdfc8b broke the build on s390 since it is headers only,
and had nothing installed in /usr/src/kernels... since kernel-devel
is only produced when there's a BuildKernel, just prune '*.cmd'
when doing BuildKernel for the same net result.
---
 kernel.spec | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index c9bd53beb..599cc82a3 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1685,9 +1685,12 @@ BuildKernel() {
     done
 
     # Move the devel headers out of the root file system
-    mkdir -p $RPM_BUILD_ROOT/usr/src/kernels
+    mkdir -p $RPM_BUILD_ROOT/$DevelDir
     mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir
     ln -sf ../../..$DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
+
+    # prune junk from kernel-devel
+    find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \;
 }
 
 ###
@@ -1807,8 +1810,6 @@ find $RPM_BUILD_ROOT/usr/include \
      \( -name .install -o -name .check -o \
      	-name ..install.cmd -o -name ..check.cmd \) | xargs rm -f
 
-find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \;
-
 # glibc provides scsi headers for itself, for now
 rm -rf $RPM_BUILD_ROOT/usr/include/scsi
 rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h

From 817eb060dede5b4bfc5f5d136a3c2b853e9e5f93 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Mon, 6 Dec 2010 12:49:34 -0500
Subject: [PATCH 52/56] fix thinko in previous commit, resulting in
 /usr/src/kernels/$ver/build/

---
 kernel.spec | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel.spec b/kernel.spec
index 599cc82a3..9597e73fb 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1685,7 +1685,7 @@ BuildKernel() {
     done
 
     # Move the devel headers out of the root file system
-    mkdir -p $RPM_BUILD_ROOT/$DevelDir
+    mkdir -p $RPM_BUILD_ROOT/usr/src/kernels
     mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir
     ln -sf ../../..$DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
 

From fc2adbbad5667fb20d95ae72d6b58d09fa62fc32 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Wed, 8 Dec 2010 17:14:54 -0500
Subject: [PATCH 53/56] ameliorate the load average account issues with
 dynticks (rhbz#650934)

---
 kernel.spec                                   |   9 +
 sched-cure-more-NO_HZ-load-average-woes.patch | 273 ++++++++++++++++++
 2 files changed, 282 insertions(+)
 create mode 100644 sched-cure-more-NO_HZ-load-average-woes.patch

diff --git a/kernel.spec b/kernel.spec
index 9597e73fb..cc60e7e7f 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -756,6 +756,8 @@ Patch12417: tty-open-hangup-race-fixup.patch
 Patch12420: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 Patch12421: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
 
+Patch12430: sched-cure-more-NO_HZ-load-average-woes.patch
+
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1415,6 +1417,9 @@ ApplyPatch tty-open-hangup-race-fixup.patch
 ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
 
+# rhbz#650934
+ApplyPatch sched-cure-more-NO_HZ-load-average-woes.patch
+
 # END OF PATCH APPLICATIONS
 
 %endif
@@ -2029,6 +2034,10 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com>
+- sched-cure-more-NO_HZ-load-average-woes.patch: fix some of the complaints
+  in 2.6.35+ about load average with dynticks. (rhbz#650934)
+
 * Sat Dec 04 2010 Kyle McMartin <kyle@redhat.com>
 - Enable C++ symbol demangling with perf by linking against libiberty.a,
   which is LGPL2.
diff --git a/sched-cure-more-NO_HZ-load-average-woes.patch b/sched-cure-more-NO_HZ-load-average-woes.patch
new file mode 100644
index 000000000..a4053c3b4
--- /dev/null
+++ b/sched-cure-more-NO_HZ-load-average-woes.patch
@@ -0,0 +1,273 @@
+From bounces.tip@hpa.at.zytor.com Wed Dec  8 15:40:48 2010
+From: tip-bot for Peter Zijlstra <a.p.zijlstra@chello.nl>
+In-Reply-To: <1291129145.32004.874.camel@laptop>
+References: <1291129145.32004.874.camel@laptop>
+Subject: [tip:sched/urgent] sched: Cure more NO_HZ load average woes
+Message-ID: <tip-0f004f5a696a9434b7214d0d3cbd0525ee77d428@git.kernel.org>
+Git-Commit-ID: 0f004f5a696a9434b7214d0d3cbd0525ee77d428
+
+Commit-ID:  0f004f5a696a9434b7214d0d3cbd0525ee77d428
+Gitweb:     http://git.kernel.org/tip/0f004f5a696a9434b7214d0d3cbd0525ee77d428
+Author:     Peter Zijlstra <a.p.zijlstra@chello.nl>
+AuthorDate: Tue, 30 Nov 2010 19:48:45 +0100
+Committer:  Ingo Molnar <mingo@elte.hu>
+CommitDate: Wed, 8 Dec 2010 20:15:04 +0100
+
+sched: Cure more NO_HZ load average woes
+
+There's a long-running regression that proved difficult to fix and
+which is hitting certain people and is rather annoying in its effects.
+
+Damien reported that after 74f5187ac8 (sched: Cure load average vs
+NO_HZ woes) his load average is unnaturally high, he also noted that
+even with that patch reverted the load avgerage numbers are not
+correct.
+
+The problem is that the previous patch only solved half the NO_HZ
+problem, it addressed the part of going into NO_HZ mode, not of
+comming out of NO_HZ mode. This patch implements that missing half.
+
+When comming out of NO_HZ mode there are two important things to take
+care of:
+
+ - Folding the pending idle delta into the global active count.
+ - Correctly aging the averages for the idle-duration.
+
+So with this patch the NO_HZ interaction should be complete and
+behaviour between CONFIG_NO_HZ=[yn] should be equivalent.
+
+Furthermore, this patch slightly changes the load average computation
+by adding a rounding term to the fixed point multiplication.
+
+Reported-by: Damien Wyart <damien.wyart@free.fr>
+Reported-by: Tim McGrath <tmhikaru@gmail.com>
+Tested-by: Damien Wyart <damien.wyart@free.fr>
+Tested-by: Orion Poplawski <orion@cora.nwra.com>
+Tested-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: stable@kernel.org
+Cc: Chase Douglas <chase.douglas@canonical.com>
+LKML-Reference: <1291129145.32004.874.camel@laptop>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ include/linux/sched.h |    2 +-
+ kernel/sched.c        |  150 +++++++++++++++++++++++++++++++++++++++++++++----
+ kernel/timer.c        |    2 +-
+ 3 files changed, 141 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 2c79e92..2238745 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu);
+ extern unsigned long this_cpu_load(void);
+ 
+ 
+-extern void calc_global_load(void);
++extern void calc_global_load(unsigned long ticks);
+ 
+ extern unsigned long get_parent_ip(unsigned long addr);
+ 
+diff --git a/kernel/sched.c b/kernel/sched.c
+index dc91a4d..6b7c26a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3119,6 +3119,15 @@ static long calc_load_fold_active(struct rq *this_rq)
+ 	return delta;
+ }
+ 
++static unsigned long
++calc_load(unsigned long load, unsigned long exp, unsigned long active)
++{
++	load *= exp;
++	load += active * (FIXED_1 - exp);
++	load += 1UL << (FSHIFT - 1);
++	return load >> FSHIFT;
++}
++
+ #ifdef CONFIG_NO_HZ
+ /*
+  * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+@@ -3148,6 +3157,128 @@ static long calc_load_fold_idle(void)
+ 
+ 	return delta;
+ }
++
++/**
++ * fixed_power_int - compute: x^n, in O(log n) time
++ *
++ * @x:         base of the power
++ * @frac_bits: fractional bits of @x
++ * @n:         power to raise @x to.
++ *
++ * By exploiting the relation between the definition of the natural power
++ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
++ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
++ * (where: n_i \elem {0, 1}, the binary vector representing n),
++ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
++ * of course trivially computable in O(log_2 n), the length of our binary
++ * vector.
++ */
++static unsigned long
++fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
++{
++	unsigned long result = 1UL << frac_bits;
++
++	if (n) for (;;) {
++		if (n & 1) {
++			result *= x;
++			result += 1UL << (frac_bits - 1);
++			result >>= frac_bits;
++		}
++		n >>= 1;
++		if (!n)
++			break;
++		x *= x;
++		x += 1UL << (frac_bits - 1);
++		x >>= frac_bits;
++	}
++
++	return result;
++}
++
++/*
++ * a1 = a0 * e + a * (1 - e)
++ *
++ * a2 = a1 * e + a * (1 - e)
++ *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
++ *    = a0 * e^2 + a * (1 - e) * (1 + e)
++ *
++ * a3 = a2 * e + a * (1 - e)
++ *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
++ *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
++ *
++ *  ...
++ *
++ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
++ *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
++ *    = a0 * e^n + a * (1 - e^n)
++ *
++ * [1] application of the geometric series:
++ *
++ *              n         1 - x^(n+1)
++ *     S_n := \Sum x^i = -------------
++ *             i=0          1 - x
++ */
++static unsigned long
++calc_load_n(unsigned long load, unsigned long exp,
++	    unsigned long active, unsigned int n)
++{
++
++	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
++}
++
++/*
++ * NO_HZ can leave us missing all per-cpu ticks calling
++ * calc_load_account_active(), but since an idle CPU folds its delta into
++ * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
++ * in the pending idle delta if our idle period crossed a load cycle boundary.
++ *
++ * Once we've updated the global active value, we need to apply the exponential
++ * weights adjusted to the number of cycles missed.
++ */
++static void calc_global_nohz(unsigned long ticks)
++{
++	long delta, active, n;
++
++	if (time_before(jiffies, calc_load_update))
++		return;
++
++	/*
++	 * If we crossed a calc_load_update boundary, make sure to fold
++	 * any pending idle changes, the respective CPUs might have
++	 * missed the tick driven calc_load_account_active() update
++	 * due to NO_HZ.
++	 */
++	delta = calc_load_fold_idle();
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++
++	/*
++	 * If we were idle for multiple load cycles, apply them.
++	 */
++	if (ticks >= LOAD_FREQ) {
++		n = ticks / LOAD_FREQ;
++
++		active = atomic_long_read(&calc_load_tasks);
++		active = active > 0 ? active * FIXED_1 : 0;
++
++		avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
++		avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
++		avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
++
++		calc_load_update += n * LOAD_FREQ;
++	}
++
++	/*
++	 * Its possible the remainder of the above division also crosses
++	 * a LOAD_FREQ period, the regular check in calc_global_load()
++	 * which comes after this will take care of that.
++	 *
++	 * Consider us being 11 ticks before a cycle completion, and us
++	 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
++	 * age us 4 cycles, and the test in calc_global_load() will
++	 * pick up the final one.
++	 */
++}
+ #else
+ static void calc_load_account_idle(struct rq *this_rq)
+ {
+@@ -3157,6 +3288,10 @@ static inline long calc_load_fold_idle(void)
+ {
+ 	return 0;
+ }
++
++static void calc_global_nohz(unsigned long ticks)
++{
++}
+ #endif
+ 
+ /**
+@@ -3174,24 +3309,17 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ 	loads[2] = (avenrun[2] + offset) << shift;
+ }
+ 
+-static unsigned long
+-calc_load(unsigned long load, unsigned long exp, unsigned long active)
+-{
+-	load *= exp;
+-	load += active * (FIXED_1 - exp);
+-	return load >> FSHIFT;
+-}
+-
+ /*
+  * calc_load - update the avenrun load estimates 10 ticks after the
+  * CPUs have updated calc_load_tasks.
+  */
+-void calc_global_load(void)
++void calc_global_load(unsigned long ticks)
+ {
+-	unsigned long upd = calc_load_update + 10;
+ 	long active;
+ 
+-	if (time_before(jiffies, upd))
++	calc_global_nohz(ticks);
++
++	if (time_before(jiffies, calc_load_update + 10))
+ 		return;
+ 
+ 	active = atomic_long_read(&calc_load_tasks);
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 68a9ae7..7bd715f 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1319,7 +1319,7 @@ void do_timer(unsigned long ticks)
+ {
+ 	jiffies_64 += ticks;
+ 	update_wall_time();
+-	calc_global_load();
++	calc_global_load(ticks);
+ }
+ 
+ #ifdef __ARCH_WANT_SYS_ALARM
+

From ee99a68c51ebd08409e45b63796f3c971c27e3d6 Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Wed, 8 Dec 2010 18:21:02 -0500
Subject: [PATCH 54/56] update to stable 2.6.36.2-rc1

---
 .gitignore                                    |   1 +
 ...ery-information-on-notification-0x81.patch |  35 +--
 ...altek-handle-unset-external-amp-bits.patch |  12 -
 ipc-shm-fix-information-leak-to-user.patch    |  30 ---
 ipc-zero-struct-memory-for-compat-fns.patch   |  73 ------
 kernel.spec                                   |  60 ++---
 linux-2.6-rcu-sched-warning.patch             | 215 ------------------
 pnpacpi-cope-with-invalid-device-ids.patch    |  85 -------
 ...vram-map-needs-to-be-gt-pci-aperture.patch |  32 ---
 sources                                       |   1 +
 ...-allow-reopen-when-ldisc-is-changing.patch |  84 -------
 tty-ldisc-fix-open-flag-handling.patch        |  54 -----
 tty-open-hangup-race-fixup.patch              |  76 -------
 tty-restore-tty_ldisc_wait_idle.patch         | 117 ----------
 xhci_hcd-suspend-resume.patch                 |   6 +-
 15 files changed, 34 insertions(+), 847 deletions(-)
 delete mode 100644 hda_realtek-handle-unset-external-amp-bits.patch
 delete mode 100644 ipc-shm-fix-information-leak-to-user.patch
 delete mode 100644 ipc-zero-struct-memory-for-compat-fns.patch
 delete mode 100644 linux-2.6-rcu-sched-warning.patch
 delete mode 100644 pnpacpi-cope-with-invalid-device-ids.patch
 delete mode 100644 radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
 delete mode 100644 tty-dont-allow-reopen-when-ldisc-is-changing.patch
 delete mode 100644 tty-ldisc-fix-open-flag-handling.patch
 delete mode 100644 tty-open-hangup-race-fixup.patch
 delete mode 100644 tty-restore-tty_ldisc_wait_idle.patch

diff --git a/.gitignore b/.gitignore
index 688b3b38b..8ee6321e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,4 @@ clog
 *.rpm
 kernel-2.6.*/
 /patch-2.6.36.1.bz2
+/patch-2.6.36.2-rc1.bz2
diff --git a/acpi-update-battery-information-on-notification-0x81.patch b/acpi-update-battery-information-on-notification-0x81.patch
index 5fe34b9c0..3a8d619ce 100644
--- a/acpi-update-battery-information-on-notification-0x81.patch
+++ b/acpi-update-battery-information-on-notification-0x81.patch
@@ -12,14 +12,14 @@ and recreate the battery in order to populate the fields correctly.
 
 Signed-off-by: Matthew Garrett <mjg@redhat.com>
 ---
- drivers/acpi/battery.c |   22 +++++++++++++++++-----
- 1 files changed, 17 insertions(+), 5 deletions(-)
+ drivers/acpi/battery.c |   20 +++++++++++++++-----
+ 1 files changed, 15 insertions(+), 5 deletions(-)
 
 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
-index dc58402..69638c4 100644
+index 95649d3..2a774a8 100644
 --- a/drivers/acpi/battery.c
 +++ b/drivers/acpi/battery.c
-@@ -562,9 +562,10 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
+@@ -605,9 +605,10 @@ static void acpi_battery_quirks2(struct acpi_battery *battery)
  	}
  }
  
@@ -31,24 +31,22 @@ index dc58402..69638c4 100644
  	result = acpi_battery_get_status(battery);
  	if (result)
  		return result;
-@@ -587,6 +588,16 @@ static int acpi_battery_update(struct acpi_battery *battery)
- 	if (!battery->bat.dev)
+@@ -628,6 +629,14 @@ static int acpi_battery_update(struct acpi_battery *battery)
  		sysfs_add_battery(battery);
- #endif
+ 	result = acpi_battery_get_state(battery);
+ 	acpi_battery_quirks2(battery);
 +	if (get_info) {
 +		acpi_battery_get_info(battery);
-+#ifdef CONFIG_ACPI_SYSFS_POWER
 +		if (old_power_unit != battery->power_unit) {
 +			/* The battery has changed its reporting units */
 +			sysfs_remove_battery(battery);
 +			sysfs_add_battery(battery);
 +		}
-+#endif
 +	}
- 	return acpi_battery_get_state(battery);
+ 	return result;
  }
  
-@@ -762,7 +773,7 @@ static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+@@ -803,7 +812,7 @@ static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
  static int acpi_battery_read(int fid, struct seq_file *seq)
  {
  	struct acpi_battery *battery = seq->private;
@@ -57,7 +55,7 @@ index dc58402..69638c4 100644
  	return acpi_print_funcs[fid](seq, result);
  }
  
-@@ -877,7 +888,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
+@@ -914,7 +923,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
  #ifdef CONFIG_ACPI_SYSFS_POWER
  	old = battery->bat.dev;
  #endif
@@ -67,7 +65,7 @@ index dc58402..69638c4 100644
  	acpi_bus_generate_proc_event(device, event,
  				     acpi_battery_present(battery));
  	acpi_bus_generate_netlink_event(device->pnp.device_class,
-@@ -908,7 +920,7 @@ static int acpi_battery_add(struct acpi_device *device)
+@@ -943,7 +953,7 @@ static int acpi_battery_add(struct acpi_device *device)
  	if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
  			"_BIX", &handle)))
  		set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
@@ -76,7 +74,7 @@ index dc58402..69638c4 100644
  #ifdef CONFIG_ACPI_PROCFS_POWER
  	result = acpi_battery_add_fs(device);
  #endif
-@@ -951,7 +963,7 @@ static int acpi_battery_resume(struct acpi_device *device)
+@@ -984,7 +994,7 @@ static int acpi_battery_resume(struct acpi_device *device)
  		return -EINVAL;
  	battery = acpi_driver_data(device);
  	battery->update_time = 0;
@@ -85,12 +83,3 @@ index dc58402..69638c4 100644
  	return 0;
  }
  
--- 
-1.7.2.1
-
---
-To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
-the body of a message to majordomo@vger.kernel.org
-More majordomo info at  http://vger.kernel.org/majordomo-info.html
-Please read the FAQ at  http://www.tux.org/lkml/
-
diff --git a/hda_realtek-handle-unset-external-amp-bits.patch b/hda_realtek-handle-unset-external-amp-bits.patch
deleted file mode 100644
index 8519fd32a..000000000
--- a/hda_realtek-handle-unset-external-amp-bits.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 0ac6aed..53f503d 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -1614,6 +1614,7 @@ do_sku:
- 		spec->init_amp = ALC_INIT_GPIO3;
- 		break;
- 	case 5:
-+	default:
- 		spec->init_amp = ALC_INIT_DEFAULT;
- 		break;
- 	}
diff --git a/ipc-shm-fix-information-leak-to-user.patch b/ipc-shm-fix-information-leak-to-user.patch
deleted file mode 100644
index b23ad439d..000000000
--- a/ipc-shm-fix-information-leak-to-user.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Vasiliy Kulikov <segooon@gmail.com>
-Date: Sat, 30 Oct 2010 14:22:49 +0000 (+0400)
-Subject: ipc: shm: fix information leak to userland
-X-Git-Tag: v2.6.37-rc1~24
-X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3af54c9bd9e6f14f896aac1bb0e8405ae0bc7a44
-
-ipc: shm: fix information leak to userland
-
-The shmid_ds structure is copied to userland with shm_unused{,2,3}
-fields unitialized.  It leads to leaking of contents of kernel stack
-memory.
-
-Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
-Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
-Cc: stable@kernel.org
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
-
-diff --git a/ipc/shm.c b/ipc/shm.c
-index fd658a1..7d3bb22 100644
---- a/ipc/shm.c
-+++ b/ipc/shm.c
-@@ -479,6 +479,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
- 	    {
- 		struct shmid_ds out;
- 
-+		memset(&out, 0, sizeof(out));
- 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
- 		out.shm_segsz	= in->shm_segsz;
- 		out.shm_atime	= in->shm_atime;
diff --git a/ipc-zero-struct-memory-for-compat-fns.patch b/ipc-zero-struct-memory-for-compat-fns.patch
deleted file mode 100644
index b682c7df0..000000000
--- a/ipc-zero-struct-memory-for-compat-fns.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Dan Rosenberg <drosenberg@vsecurity.com>
-Date: Wed, 27 Oct 2010 22:34:17 +0000 (-0700)
-Subject: ipc: initialize structure memory to zero for compat functions
-X-Git-Tag: v2.6.37-rc1~85^2~50
-X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=03145beb455cf5c20a761e8451e30b8a74ba58d9
-
-ipc: initialize structure memory to zero for compat functions
-
-This takes care of leaking uninitialized kernel stack memory to
-userspace from non-zeroed fields in structs in compat ipc functions.
-
-Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Cc: Arnd Bergmann <arnd@arndb.de>
-Cc: <stable@kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
-
-diff --git a/ipc/compat.c b/ipc/compat.c
-index 9dc2c7d..845a287 100644
---- a/ipc/compat.c
-+++ b/ipc/compat.c
-@@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
- 	struct semid64_ds __user *up64;
- 	int version = compat_ipc_parse_version(&third);
- 
-+	memset(&s64, 0, sizeof(s64));
-+
- 	if (!uptr)
- 		return -EINVAL;
- 	if (get_user(pad, (u32 __user *) uptr))
-@@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
- 	int version = compat_ipc_parse_version(&second);
- 	void __user *p;
- 
-+	memset(&m64, 0, sizeof(m64));
-+
- 	switch (second & (~IPC_64)) {
- 	case IPC_INFO:
- 	case IPC_RMID:
-@@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
- 	int err, err2;
- 	int version = compat_ipc_parse_version(&second);
- 
-+	memset(&s64, 0, sizeof(s64));
-+
- 	switch (second & (~IPC_64)) {
- 	case IPC_RMID:
- 	case SHM_LOCK:
-diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
-index d8d1e9f..380ea4f 100644
---- a/ipc/compat_mq.c
-+++ b/ipc/compat_mq.c
-@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
- 	void __user *p = NULL;
- 	if (u_attr && oflag & O_CREAT) {
- 		struct mq_attr attr;
-+
-+		memset(&attr, 0, sizeof(attr));
-+
- 		p = compat_alloc_user_space(sizeof(attr));
- 		if (get_compat_mq_attr(&attr, u_attr) ||
- 		    copy_to_user(p, &attr, sizeof(attr)))
-@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
- 	struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
- 	long ret;
- 
-+	memset(&mqstat, 0, sizeof(mqstat));
-+
- 	if (u_mqstat) {
- 		if (get_compat_mq_attr(&mqstat, u_mqstat) ||
- 		    copy_to_user(p, &mqstat, sizeof(mqstat)))
diff --git a/kernel.spec b/kernel.spec
index cc60e7e7f..8b30de6c4 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
 # For non-released -rc kernels, this will be prepended with "0.", so
 # for example a 3 here will become 0.3
 #
-%global baserelease 11
+%global baserelease 12
 %global fedora_build %{baserelease}
 
 # base_sublevel is the kernel version we're starting with and patching
@@ -63,9 +63,9 @@ Summary: The Linux kernel
 %if 0%{?released_kernel}
 
 # Do we have a -stable update to apply?
-%define stable_update 1
+%define stable_update 2
 # Is it a -stable RC?
-%define stable_rc 0
+%define stable_rc 1
 # Set rpm version accordingly
 %if 0%{?stable_update}
 %define stablerev .%{stable_update}
@@ -658,8 +658,6 @@ Patch1819: drm-intel-big-hammer.patch
 Patch1825: drm-intel-make-lvds-work.patch
 Patch1900: linux-2.6-intel-iommu-igfx.patch
 
-Patch1920: radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
-
 # linux1394 git patches
 Patch2200: linux-2.6-firewire-git-update.patch
 Patch2201: linux-2.6-firewire-git-pending.patch
@@ -680,8 +678,6 @@ Patch2912: linux-2.6-v4l-dvb-ir-core-update.patch
 #Patch2916: lirc-staging-2.6.36-fixes.patch
 Patch2917: hdpvr-ir-enable.patch
 
-Patch3000: linux-2.6-rcu-sched-warning.patch
-
 # fs fixes
 
 # NFSv4
@@ -724,35 +720,22 @@ Patch12300: btusb-macbookpro-7-1.patch
 Patch12301: btusb-macbookpro-6-2.patch
 Patch12304: add-macbookair3-ids.patch
 
-Patch12302: pnpacpi-cope-with-invalid-device-ids.patch
-
 Patch12303: dmar-disable-when-ricoh-multifunction.patch
 
 Patch12305: xhci_hcd-suspend-resume.patch
 
-Patch12307: tty-restore-tty_ldisc_wait_idle.patch
-
 Patch12308: fix-i8k-inline-asm.patch
 
-Patch12400: ipc-zero-struct-memory-for-compat-fns.patch
-Patch12401: ipc-shm-fix-information-leak-to-user.patch
-
 Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 Patch12408: netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
 
 Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
-Patch12407: hda_realtek-handle-unset-external-amp-bits.patch
-
 Patch12410: tty-make-tiocgicount-a-handler.patch
 Patch12411: tty-icount-changeover-for-other-main-devices.patch
 
 Patch12413: tpm-autodetect-itpm-devices.patch
 
-Patch12415: tty-dont-allow-reopen-when-ldisc-is-changing.patch
-Patch12416: tty-ldisc-fix-open-flag-handling.patch
-Patch12417: tty-open-hangup-race-fixup.patch
-
 Patch12420: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 Patch12421: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
 
@@ -1310,8 +1293,6 @@ ApplyPatch drm-intel-big-hammer.patch
 ApplyPatch drm-intel-make-lvds-work.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
 
-ApplyPatch radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
-
 # linux1394 git patches
 #ApplyPatch linux-2.6-firewire-git-update.patch
 #ApplyOptionalPatch linux-2.6-firewire-git-pending.patch
@@ -1334,9 +1315,6 @@ ApplyOptionalPatch linux-2.6-v4l-dvb-experimental.patch
 # enable IR receiver on Hauppauge HD PVR (v4l-dvb merge pending)
 ApplyPatch hdpvr-ir-enable.patch
 
-# silence another rcu_reference warning
-ApplyPatch linux-2.6-rcu-sched-warning.patch
-
 # Patches headed upstream
 ApplyPatch disable-i8042-check-on-apple-mac.patch
 
@@ -1375,24 +1353,13 @@ ApplyPatch btusb-macbookpro-7-1.patch
 ApplyPatch btusb-macbookpro-6-2.patch
 ApplyPatch add-macbookair3-ids.patch
 
-# rhbz#641468
-ApplyPatch pnpacpi-cope-with-invalid-device-ids.patch
-
 # rhbz#605888
 ApplyPatch dmar-disable-when-ricoh-multifunction.patch
 
 ApplyPatch xhci_hcd-suspend-resume.patch
 
-ApplyPatch tty-restore-tty_ldisc_wait_idle.patch
-
 ApplyPatch fix-i8k-inline-asm.patch
 
-# rhbz#648658 (CVE-2010-4073)
-ApplyPatch ipc-zero-struct-memory-for-compat-fns.patch
-
-# rhbz#648656 (CVE-2010-4072)
-ApplyPatch ipc-shm-fix-information-leak-to-user.patch
-
 # rhbz#651264 (CVE-2010-3880)
 ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
 ApplyPatch netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
@@ -1400,19 +1367,12 @@ ApplyPatch netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
 # rhbz#656264
 ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
 
-# rhbz#657388
-ApplyPatch hda_realtek-handle-unset-external-amp-bits.patch
-
 # CVE-2010-4077, CVE-2010-4075 (rhbz#648660, #648663)
 ApplyPatch tty-make-tiocgicount-a-handler.patch
 ApplyPatch tty-icount-changeover-for-other-main-devices.patch
 
 ApplyPatch tpm-autodetect-itpm-devices.patch
 
-ApplyPatch tty-dont-allow-reopen-when-ldisc-is-changing.patch
-ApplyPatch tty-ldisc-fix-open-flag-handling.patch
-ApplyPatch tty-open-hangup-race-fixup.patch
-
 # backport some fixes for kswapd from mmotm, rhbz#649694
 ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
@@ -2034,6 +1994,20 @@ fi
 #                 ||     ||
 
 %changelog
+* Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.2-12.rc1
+- Linux stable 2.6.36.2-rc1
+- Drop patches merged in stable series:
+   tty-dont-allow-reopen-when-ldisc-is-changing.patch
+   tty-ldisc-fix-open-flag-handling.patch
+   tty-open-hangup-race-fixup.patch
+   tty-restore-tty_ldisc_wait_idle.patch
+   hda_realtek-handle-unset-external-amp-bits.patch
+   ipc-shm-fix-information-leak-to-user.patch
+   ipc-zero-struct-memory-for-compat-fns.patch
+   linux-2.6-rcu-sched-warning.patch
+   pnpacpi-cope-with-invalid-device-ids.patch
+   radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
+
 * Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com>
 - sched-cure-more-NO_HZ-load-average-woes.patch: fix some of the complaints
   in 2.6.35+ about load average with dynticks. (rhbz#650934)
diff --git a/linux-2.6-rcu-sched-warning.patch b/linux-2.6-rcu-sched-warning.patch
deleted file mode 100644
index ab3ff006b..000000000
--- a/linux-2.6-rcu-sched-warning.patch
+++ /dev/null
@@ -1,215 +0,0 @@
-From davej  Thu Sep 16 11:55:58 2010
-Return-Path: linux-kernel-owner@vger.kernel.org
-X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on gelk
-X-Spam-Level: 
-X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_MED,
-	T_RP_MATCHES_RCVD,UNPARSEABLE_RELAY autolearn=ham version=3.3.1
-Received: from mail.corp.redhat.com [10.5.5.52]
-	by gelk with IMAP (fetchmail-6.3.17)
-	for <davej@localhost> (single-drop); Thu, 16 Sep 2010 11:55:58 -0400 (EDT)
-Received: from zmta02.collab.prod.int.phx2.redhat.com (LHLO
- zmta02.collab.prod.int.phx2.redhat.com) (10.5.5.32) by
- mail04.corp.redhat.com with LMTP; Thu, 16 Sep 2010 11:51:27 -0400 (EDT)
-Received: from localhost (localhost.localdomain [127.0.0.1])
-	by zmta02.collab.prod.int.phx2.redhat.com (Postfix) with ESMTP id 4889C9FC56;
-	Thu, 16 Sep 2010 11:51:27 -0400 (EDT)
-Received: from zmta02.collab.prod.int.phx2.redhat.com ([127.0.0.1])
-	by localhost (zmta02.collab.prod.int.phx2.redhat.com [127.0.0.1]) (amavisd-new, port 10024)
-	with ESMTP id 94mQrmwfCpY4; Thu, 16 Sep 2010 11:51:27 -0400 (EDT)
-Received: from int-mx03.intmail.prod.int.phx2.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.16])
-	by zmta02.collab.prod.int.phx2.redhat.com (Postfix) with ESMTP id 0DBDB9FC4B;
-	Thu, 16 Sep 2010 11:51:27 -0400 (EDT)
-Received: from mx1.redhat.com (ext-mx05.extmail.prod.ext.phx2.redhat.com [10.5.110.9])
-	by int-mx03.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o8GFpQnO003857;
-	Thu, 16 Sep 2010 11:51:26 -0400
-Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
-	by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o8GFFCFE031066;
-	Thu, 16 Sep 2010 11:51:17 -0400
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
-	id S1755493Ab0IPPvH (ORCPT <rfc822;jasowang@redhat.com> + 41 others);
-	Thu, 16 Sep 2010 11:51:07 -0400
-Received: from casper.infradead.org ([85.118.1.10]:41834 "EHLO
-	casper.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
-	with ESMTP id S1754921Ab0IPPvC convert rfc822-to-8bit (ORCPT
-	<rfc822;linux-kernel@vger.kernel.org>);
-	Thu, 16 Sep 2010 11:51:02 -0400
-Received: from f199130.upc-f.chello.nl ([80.56.199.130] helo=laptop)
-	by casper.infradead.org with esmtpsa (Exim 4.72 #1 (Red Hat Linux))
-	id 1OwGjI-0003VE-Ux; Thu, 16 Sep 2010 15:50:33 +0000
-Received: by laptop (Postfix, from userid 1000)
-	id 6DCDB100AEB1D; Thu, 16 Sep 2010 17:50:32 +0200 (CEST)
-Subject: Re: 2.6.35-stable/ppc64/p7: suspicious rcu_dereference_check()
- usage detected during 2.6.35-stable boot
-From: Peter Zijlstra <peterz@infradead.org>
-To: paulmck@linux.vnet.ibm.com
-Cc: Subrata Modak <subrata@linux.vnet.ibm.com>,
-        linux-kernel <linux-kernel@vger.kernel.org>,
-        Li Zefan <lizf@cn.fujitsu.com>, Linuxppc-dev <Linuxppc-dev@ozlabs.org>,
-        sachinp <sachinp@linux.vnet.ibm.com>,
-        DIVYA PRAKASH <dipraksh@linux.vnet.ibm.com>,
-        "Valdis.Kletnieks" <Valdis.Kletnieks@vt.edu>
-In-Reply-To: <20100809161200.GC3026@linux.vnet.ibm.com>
-References: <1280739132.15317.9.camel@subratamodak.linux.ibm.com>
-	 <20100809161200.GC3026@linux.vnet.ibm.com>
-Content-Type: text/plain; charset="UTF-8"
-Content-Transfer-Encoding: 8BIT
-Date: 	Thu, 16 Sep 2010 17:50:31 +0200
-Message-ID: <1284652231.2275.569.camel@laptop>
-Mime-Version: 1.0
-Sender: linux-kernel-owner@vger.kernel.org
-Precedence: bulk
-List-ID: <linux-kernel.vger.kernel.org>
-X-Mailing-List: 	linux-kernel@vger.kernel.org
-X-RedHat-Spam-Score: -2.31  (RCVD_IN_DNSWL_MED,T_RP_MATCHES_RCVD)
-X-Scanned-By: MIMEDefang 2.67 on 10.5.11.16
-X-Scanned-By: MIMEDefang 2.67 on 10.5.110.9
-Status: RO
-Content-Length: 6752
-Lines: 145
-
-On Mon, 2010-08-09 at 09:12 -0700, Paul E. McKenney wrote:
-
-> > [    0.051203] CPU0: AMD QEMU Virtual CPU version 0.12.4 stepping 03
-> > [    0.052999] lockdep: fixing up alternatives.
-> > [    0.054105]
-> > [    0.054106] ===================================================
-> > [    0.054999] [ INFO: suspicious rcu_dereference_check() usage. ]
-> > [    0.054999] ---------------------------------------------------
-> > [    0.054999] kernel/sched.c:616 invoked rcu_dereference_check() without protection!
-> > [    0.054999]
-> > [    0.054999] other info that might help us debug this:
-> > [    0.054999]
-> > [    0.054999]
-> > [    0.054999] rcu_scheduler_active = 1, debug_locks = 1
-> > [    0.054999] 3 locks held by swapper/1:
-> > [    0.054999]  #0:  (cpu_add_remove_lock){+.+.+.}, at: [<ffffffff814be933>] cpu_up+0x42/0x6a
-> > [    0.054999]  #1:  (cpu_hotplug.lock){+.+.+.}, at: [<ffffffff810400d8>] cpu_hotplug_begin+0x2a/0x51
-> > [    0.054999]  #2:  (&rq->lock){-.-...}, at: [<ffffffff814be2f7>] init_idle+0x2f/0x113
-> > [    0.054999]
-> > [    0.054999] stack backtrace:
-> > [    0.054999] Pid: 1, comm: swapper Not tainted 2.6.35 #1
-> > [    0.054999] Call Trace:
-> > [    0.054999]  [<ffffffff81068054>] lockdep_rcu_dereference+0x9b/0xa3
-> > [    0.054999]  [<ffffffff810325c3>] task_group+0x7b/0x8a
-> > [    0.054999]  [<ffffffff810325e5>] set_task_rq+0x13/0x40
-> > [    0.054999]  [<ffffffff814be39a>] init_idle+0xd2/0x113
-> > [    0.054999]  [<ffffffff814be78a>] fork_idle+0xb8/0xc7
-> > [    0.054999]  [<ffffffff81068717>] ? mark_held_locks+0x4d/0x6b
-> > [    0.054999]  [<ffffffff814bcebd>] do_fork_idle+0x17/0x2b
-> > [    0.054999]  [<ffffffff814bc89b>] native_cpu_up+0x1c1/0x724
-> > [    0.054999]  [<ffffffff814bcea6>] ? do_fork_idle+0x0/0x2b
-> > [    0.054999]  [<ffffffff814be876>] _cpu_up+0xac/0x127
-> > [    0.054999]  [<ffffffff814be946>] cpu_up+0x55/0x6a
-> > [    0.054999]  [<ffffffff81ab562a>] kernel_init+0xe1/0x1ff
-> > [    0.054999]  [<ffffffff81003854>] kernel_thread_helper+0x4/0x10
-> > [    0.054999]  [<ffffffff814c353c>] ? restore_args+0x0/0x30
-> > [    0.054999]  [<ffffffff81ab5549>] ? kernel_init+0x0/0x1ff
-> > [    0.054999]  [<ffffffff81003850>] ? kernel_thread_helper+0x0/0x10
-> > [    0.056074] Booting Node   0, Processors  #1lockdep: fixing up alternatives.
-> > [    0.130045]  #2lockdep: fixing up alternatives.
-> > [    0.203089]  #3 Ok.
-> > [    0.275286] Brought up 4 CPUs
-> > [    0.276005] Total of 4 processors activated (16017.17 BogoMIPS).
-> 
-> This does look like a new one, thank you for reporting it!
-> 
-> Here is my analysis, which should at least provide some humor value to
-> those who understand the code better than I do.  ;-)
-> 
-> So the corresponding rcu_dereference_check() is in
-> task_subsys_state_check(), and is fetching the cpu_cgroup_subsys_id
-> element of the newly created task's task->cgroups->subsys[] array.
-> The "git grep" command finds only three uses of cpu_cgroup_subsys_id,
-> but no definition.
-> 
-> Now, fork_idle() invokes copy_process(), which invokes cgroup_fork(),
-> which sets the child process's ->cgroups pointer to that of the parent,
-> also invoking get_css_set(), which increments the corresponding reference
-> count, doing both operations under task_lock() protection (->alloc_lock).
-> Because fork_idle() does not specify any of CLONE_NEWNS, CLONE_NEWUTS,
-> CLONE_NEWIPC, CLONE_NEWPID, or CLONE_NEWNET, copy_namespaces() should
-> not create a new namespace, and so there should be no ns_cgroup_clone().
-> We should thus retain the parent's ->cgroups pointer.  And copy_process()
-> installs the new task in the various lists, so that the task is externally
-> accessible upon return.
-> 
-> After a non-error return from copy_process(), fork_init() invokes
-> init_idle_pid(), which does not appear to affect the task's cgroup
-> state.  Next fork_init() invokes init_idle(), which in turn invokes
-> __set_task_cpu(), which invokes set_task_rq(), which calls task_group()
-> several times, which calls task_subsys_state_check(), which calls the
-> rcu_dereference_check() that complained above.
-> 
-> However, the result returns by rcu_dereference_check() is stored into
-> the task structure:
-> 
-> 	p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
-> 	p->se.parent = task_group(p)->se[cpu];
-> 
-> This means that the corresponding structure must have been tied down with
-> a reference count or some such.  If such a reference has been taken, then
-> this complaint is a false positive, and could be suppressed by putting
-> rcu_read_lock() and rcu_read_unlock() around the call to init_idle()
-> from fork_idle().  However, although, reference to the enclosing ->cgroups
-> struct css_set is held, it is not clear to me that this reference applies
-> to the structures pointed to by the ->subsys[] array, especially given
-> that the cgroup_subsys_state structures referenced by this array have
-> their own reference count, which does not appear to me to be acquired
-> by this code path.
-> 
-> Or are the cgroup_subsys_state structures referenced by idle tasks
-> never freed or some such?
-
-I would hope so!, the idle tasks should be part of the root cgroup,
-which is not removable.
-
-The problem is that while we do in-fact hold rq->lock, the newly spawned
-idle thread's cpu is not yet set to the correct cpu so the lockdep check
-in task_group():
-
-  lockdep_is_held(&task_rq(p)->lock)
-
-will fail.
-
-But of a chicken and egg problem. Setting the cpu needs to have the cpu
-set ;-)
-
-Ingo, why do we have rq->lock there at all? The CPU isn't up and running
-yet, nothing should be touching it.
-
-Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
----
- kernel/sched.c |   12 ++++++++++++
- 1 files changed, 12 insertions(+), 0 deletions(-)
-
-diff --git a/kernel/sched.c b/kernel/sched.c
-index bd8b487..6241049 100644
---- a/kernel/sched.c
-+++ b/kernel/sched.c
-@@ -5332,7 +5332,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
- 	idle->se.exec_start = sched_clock();
- 
- 	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
-+	/*
-+	 * We're having a chicken and egg problem, even though we are
-+	 * holding rq->lock, the cpu isn't yet set to this cpu so the
-+	 * lockdep check in task_group() will fail.
-+	 *
-+	 * Similar case to sched_fork(). / Alternatively we could
-+	 * use task_rq_lock() here and obtain the other rq->lock.
-+	 *
-+	 * Silence PROVE_RCU
-+	 */
-+	rcu_read_lock();
- 	__set_task_cpu(idle, cpu);
-+	rcu_read_unlock();
- 
- 	rq->curr = rq->idle = idle;
- #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
-
---
-To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
-the body of a message to majordomo@vger.kernel.org
-More majordomo info at  http://vger.kernel.org/majordomo-info.html
-Please read the FAQ at  http://www.tux.org/lkml/
-
diff --git a/pnpacpi-cope-with-invalid-device-ids.patch b/pnpacpi-cope-with-invalid-device-ids.patch
deleted file mode 100644
index 70b2ab968..000000000
--- a/pnpacpi-cope-with-invalid-device-ids.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-commit 420a0f66378c84b00b0e603e4d38210102dbe367
-Author: Dmitry Torokhov <dmitry.torokhov@gmail.com>
-Date:   Sat Sep 18 10:11:09 2010 -0700
-
-    PNPACPI: cope with invalid device IDs
-    
-    If primary ID (HID) is invalid try locating first valid ID on compatible
-    ID list before giving up.
-    
-    This helps, for example, to recognize i8042 AUX port on Sony Vaio VPCZ1
-    which uses SNYSYN0003 as HID. Without the patch users are forced to
-    boot with i8042.nopnp to make use of their touchpads.
-    
-    Tested-by: Jan-Hendrik Zab <jan@jhz.name>
-    Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
-    Signed-off-by: Len Brown <len.brown@intel.com>
-
-diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
-index dc4e32e..0d943ee 100644
---- a/drivers/pnp/pnpacpi/core.c
-+++ b/drivers/pnp/pnpacpi/core.c
-@@ -28,7 +28,7 @@
- #include "../base.h"
- #include "pnpacpi.h"
- 
--static int num = 0;
-+static int num;
- 
- /* We need only to blacklist devices that have already an acpi driver that
-  * can't use pnp layer. We don't need to blacklist device that are directly
-@@ -180,11 +180,24 @@ struct pnp_protocol pnpacpi_protocol = {
- };
- EXPORT_SYMBOL(pnpacpi_protocol);
- 
-+static char *pnpacpi_get_id(struct acpi_device *device)
-+{
-+	struct acpi_hardware_id *id;
-+
-+	list_for_each_entry(id, &device->pnp.ids, list) {
-+		if (ispnpidacpi(id->id))
-+			return id->id;
-+	}
-+
-+	return NULL;
-+}
-+
- static int __init pnpacpi_add_device(struct acpi_device *device)
- {
- 	acpi_handle temp = NULL;
- 	acpi_status status;
- 	struct pnp_dev *dev;
-+	char *pnpid;
- 	struct acpi_hardware_id *id;
- 
- 	/*
-@@ -192,11 +205,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
- 	 * driver should not be loaded.
- 	 */
- 	status = acpi_get_handle(device->handle, "_CRS", &temp);
--	if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
--	    is_exclusive_device(device) || (!device->status.present))
-+	if (ACPI_FAILURE(status))
-+		return 0;
-+
-+	pnpid = pnpacpi_get_id(device);
-+	if (!pnpid)
-+		return 0;
-+
-+	if (is_exclusive_device(device) || !device->status.present)
- 		return 0;
- 
--	dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
-+	dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
- 	if (!dev)
- 		return -ENOMEM;
- 
-@@ -227,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
- 		pnpacpi_parse_resource_option_data(dev);
- 
- 	list_for_each_entry(id, &device->pnp.ids, list) {
--		if (!strcmp(id->id, acpi_device_hid(device)))
-+		if (!strcmp(id->id, pnpid))
- 			continue;
- 		if (!ispnpidacpi(id->id))
- 			continue;
diff --git a/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch b/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
deleted file mode 100644
index 88fa35e2f..000000000
--- a/radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-commit b7d8cce5b558e0c0aa6898c9865356481598b46d
-Author: Alex Deucher <alexdeucher@gmail.com>
-Date:   Mon Oct 25 19:44:00 2010 -0400
-
-    drm/radeon/kms: MC vram map needs to be >= pci aperture size
-    
-    The vram map in the radeon memory controller needs to be
-    >= the pci aperture size.  Fixes:
-    https://bugs.freedesktop.org/show_bug.cgi?id=28402
-    
-    The problematic cards in the above bug have 64 MB of vram,
-    but the pci aperture is 128 MB and the MC vram map was only
-    64 MB.  This can lead to hangs.
-    
-    Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
-    Cc: stable@kernel.org
-    Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
-index 6112ac9..6d1540c 100644
---- a/drivers/gpu/drm/radeon/r100.c
-+++ b/drivers/gpu/drm/radeon/r100.c
-@@ -2270,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
- 		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
- 		 * Novell bug 204882 + along with lots of ubuntu ones
- 		 */
-+		if (rdev->mc.aper_size > config_aper_size)
-+			config_aper_size = rdev->mc.aper_size;
-+
- 		if (config_aper_size > rdev->mc.real_vram_size)
- 			rdev->mc.mc_vram_size = config_aper_size;
- 		else
diff --git a/sources b/sources
index de818e4f3..df0066685 100644
--- a/sources
+++ b/sources
@@ -1,2 +1,3 @@
 61f3739a73afb6914cb007f37fb09b62  linux-2.6.36.tar.bz2
 dd38a6caf08df2822f93541ee95aed7d  patch-2.6.36.1.bz2
+33b11b4b8fcd47601a0e1e51586c4b04  patch-2.6.36.2-rc1.bz2
diff --git a/tty-dont-allow-reopen-when-ldisc-is-changing.patch b/tty-dont-allow-reopen-when-ldisc-is-changing.patch
deleted file mode 100644
index 4b822d5f9..000000000
--- a/tty-dont-allow-reopen-when-ldisc-is-changing.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From jirislaby@gmail.com Thu Nov 25 12:16:42 2010
-From: Jiri Slaby <jslaby@suse.cz>
-Subject: [PATCH 1/1] TTY: don't allow reopen when ldisc is changing
-Date: Thu, 25 Nov 2010 18:16:23 +0100
-
-There are many WARNINGs like the following reported nowadays:
-WARNING: at drivers/tty/tty_io.c:1331 tty_open+0x2a2/0x49a()
-Hardware name: Latitude E6500
-Modules linked in:
-Pid: 1207, comm: plymouthd Not tainted 2.6.37-rc3-mmotm1123 #3
-Call Trace:
- [<ffffffff8103b189>] warn_slowpath_common+0x80/0x98
- [<ffffffff8103b1b6>] warn_slowpath_null+0x15/0x17
- [<ffffffff8128a3ab>] tty_open+0x2a2/0x49a
- [<ffffffff810fd53f>] chrdev_open+0x11d/0x146
-...
-
-This means tty_reopen is called without TTY_LDISC set. For further
-considerations, note tty_lock is held in tty_open. TTY_LDISC is cleared in:
-1) __tty_hangup from tty_ldisc_hangup to tty_ldisc_enable. During this
-section tty_lock is held.
-
-2) tty_release via tty_ldisc_release till the end of tty existence. If
-tty->count <= 1, tty_lock is taken, TTY_CLOSING bit set and then
-tty_ldisc_release called. tty_reopen checks TTY_CLOSING before checking
-TTY_LDISC.
-
-3) tty_set_ldisc from tty_ldisc_halt to tty_ldisc_enable. We:
-   * take tty_lock, set TTY_LDISC_CHANGING, put tty_lock
-   * call tty_ldisc_halt (clear TTY_LDISC), tty_lock is _not_ held
-   * do some other work
-   * take tty_lock, call tty_ldisc_enable (set TTY_LDISC), put
-     tty_lock
-
-So the only option I see is 3). The solution is to check
-TTY_LDISC_CHANGING along with TTY_CLOSING in tty_reopen.
-
-Nicely reproducible with two processes:
-while (1) {
-	fd = open("/dev/ttyS1", O_RDWR);
-	if (fd < 0) {
-		warn("open");
-		continue;
-	}
-	close(fd);
-}
---------
-while (1) {
-        fd = open("/dev/ttyS1", O_RDWR);
-        ld1 = 0; ld2 = 2;
-        while (1) {
-                ioctl(fd, TIOCSETD, &ld1);
-                ioctl(fd, TIOCSETD, &ld2);
-        }
-        close(fd);
-}
-
-Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-Reported-by: <Valdis.Kletnieks@vt.edu>
-Cc: Kyle McMartin <kyle@mcmartin.ca>
-Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
----
- drivers/tty/tty_io.c |    3 ++-
- 1 files changed, 2 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index c05c5af..878f6d6 100644
---- a/drivers/char/tty_io.c
-+++ b/drivers/char/tty_io.c
-@@ -1310,7 +1310,8 @@ static int tty_reopen(struct tty_struct *tty)
- {
- 	struct tty_driver *driver = tty->driver;
- 
--	if (test_bit(TTY_CLOSING, &tty->flags))
-+	if (test_bit(TTY_CLOSING, &tty->flags) ||
-+			test_bit(TTY_LDISC_CHANGING, &tty->flags))
- 		return -EIO;
- 
- 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
--- 
-1.7.3.1
-
-
-
diff --git a/tty-ldisc-fix-open-flag-handling.patch b/tty-ldisc-fix-open-flag-handling.patch
deleted file mode 100644
index 72c67a976..000000000
--- a/tty-ldisc-fix-open-flag-handling.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From linux-kernel-owner@vger.kernel.org Wed Nov 24 18:28:11 2010
-From:	Jiri Slaby <jslaby@suse.cz>
-Subject: [PATCH 1/2] TTY: ldisc, fix open flag handling
-Date:	Thu, 25 Nov 2010 00:27:54 +0100
-
-When a concrete ldisc open fails in tty_ldisc_open, we forget to clear
-TTY_LDISC_OPEN. This causes a false warning on the next ldisc open:
-WARNING: at drivers/char/tty_ldisc.c:445 tty_ldisc_open+0x26/0x38()
-Hardware name: System Product Name
-Modules linked in: ...
-Pid: 5251, comm: a.out Tainted: G        W  2.6.32-5-686 #1
-Call Trace:
- [<c1030321>] ? warn_slowpath_common+0x5e/0x8a
- [<c1030357>] ? warn_slowpath_null+0xa/0xc
- [<c119311c>] ? tty_ldisc_open+0x26/0x38
- [<c11936c5>] ? tty_set_ldisc+0x218/0x304
-...
-
-So clear the bit when failing...
-
-Introduced in c65c9bc3efa (tty: rewrite the ldisc locking) back in
-2.6.31-rc1.
-
-Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-Cc: Alan Cox <alan@linux.intel.com>
-Reported-by: Sergey Lapin <slapin@ossfans.org>
-Tested-by: Sergey Lapin <slapin@ossfans.org>
----
- drivers/tty/tty_ldisc.c |    2 ++
- 1 files changed, 2 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
-index d8e96b0..4214d58 100644
---- a/drivers/char/tty_ldisc.c
-+++ b/drivers/char/tty_ldisc.c
-@@ -454,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
-                 /* BTM here locks versus a hangup event */
- 		WARN_ON(!tty_locked());
- 		ret = ld->ops->open(tty);
-+		if (ret)
-+			clear_bit(TTY_LDISC_OPEN, &tty->flags);
- 		return ret;
- 	}
- 	return 0;
--- 
-1.7.3.1
-
-
---
-To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
-the body of a message to majordomo@vger.kernel.org
-More majordomo info at  http://vger.kernel.org/majordomo-info.html
-Please read the FAQ at  http://www.tux.org/lkml/
-
diff --git a/tty-open-hangup-race-fixup.patch b/tty-open-hangup-race-fixup.patch
deleted file mode 100644
index bfd29ecf7..000000000
--- a/tty-open-hangup-race-fixup.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 9e88e8b9915b5e067507a087437d80e6a133d612 Mon Sep 17 00:00:00 2001
-From: Jiri Slaby <jslaby@suse.cz>
-Date: Sat, 27 Nov 2010 16:06:46 +0100
-Subject: [PATCH 1/1] TTY: open/hangup race fixup
-
-
-Signed-off-by: Jiri Slaby <jslaby@suse.cz>
----
- drivers/tty/tty_io.c |   10 +++++++++-
- include/linux/tty.h  |    1 +
- 2 files changed, 10 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 878f6d6..35480dd 100644
---- a/drivers/char/tty_io.c
-+++ b/drivers/char/tty_io.c
-@@ -559,6 +559,9 @@ void __tty_hangup(struct tty_struct *tty)
- 
- 	tty_lock();
- 
-+	/* some functions below drop BTM, so we need this bit */
-+	set_bit(TTY_HUPPING, &tty->flags);
-+
- 	/* inuse_filps is protected by the single tty lock,
- 	   this really needs to change if we want to flush the
- 	   workqueue with the lock held */
-@@ -578,6 +581,10 @@ void __tty_hangup(struct tty_struct *tty)
- 	}
- 	spin_unlock(&tty_files_lock);
- 
-+	/*
-+	 * it drops BTM and thus races with reopen
-+	 * we protect the race by TTY_HUPPING
-+	 */
- 	tty_ldisc_hangup(tty);
- 
- 	read_lock(&tasklist_lock);
-@@ -615,7 +622,6 @@ void __tty_hangup(struct tty_struct *tty)
- 	tty->session = NULL;
- 	tty->pgrp = NULL;
- 	tty->ctrl_status = 0;
--	set_bit(TTY_HUPPED, &tty->flags);
- 	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- 
- 	/* Account for the p->signal references we killed */
-@@ -641,6 +647,7 @@ void __tty_hangup(struct tty_struct *tty)
- 	 * can't yet guarantee all that.
- 	 */
- 	set_bit(TTY_HUPPED, &tty->flags);
-+	clear_bit(TTY_HUPPING, &tty->flags);
- 	tty_ldisc_enable(tty);
- 
- 	tty_unlock();
-@@ -1311,6 +1318,7 @@ static int tty_reopen(struct tty_struct *tty)
- 	struct tty_driver *driver = tty->driver;
- 
- 	if (test_bit(TTY_CLOSING, &tty->flags) ||
-+			test_bit(TTY_HUPPING, &tty->flags) ||
- 			test_bit(TTY_LDISC_CHANGING, &tty->flags))
- 		return -EIO;
- 
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 032d79f..54e4eaa 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -366,6 +366,7 @@ struct tty_file_private {
- #define TTY_HUPPED 		18	/* Post driver->hangup() */
- #define TTY_FLUSHING		19	/* Flushing to ldisc in progress */
- #define TTY_FLUSHPENDING	20	/* Queued buffer flush pending */
-+#define TTY_HUPPING 		21	/* ->hangup() in progress */
- 
- #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
- 
--- 
-1.7.3.1
-
diff --git a/tty-restore-tty_ldisc_wait_idle.patch b/tty-restore-tty_ldisc_wait_idle.patch
deleted file mode 100644
index 3e784dd57..000000000
--- a/tty-restore-tty_ldisc_wait_idle.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From 4d458f558d5b904f14080b073b549d18c9503f93 Mon Sep 17 00:00:00 2001
-From: Jiri Slaby <jslaby@suse.cz>
-Date: Sun, 31 Oct 2010 23:17:51 +0100
-Subject: TTY: restore tty_ldisc_wait_idle
-
-It was removed in 65b770468e98 (tty-ldisc: turn ldisc user count into
-a proper refcount), but we need to wait for last user to quit the
-ldisc before we close it in tty_set_ldisc.
-
-Otherwise weird things start to happen. There might be processes
-waiting in tty_read->n_tty_read on tty->read_wait for input to appear
-and at that moment, a change of ldisc is fatal. n_tty_close is called,
-it frees read_buf and the waiting process is still in the middle of
-reading and goes nuts after it is woken.
-
-Previously we prevented close to happen when others are in ldisc ops
-by tty_ldisc_wait_idle in tty_set_ldisc. But the commit above removed
-that. So revoke the change and test whether there is 1 user (=we), and
-allow the close then.
-
-We can do that without ldisc/tty locks, because nobody else can open
-the device due to TTY_LDISC_CHANGING bit set, so we in fact wait for
-everybody to leave.
-
-I don't understand why tty_ldisc_lock would be needed either when the
-counter is an atomic variable, so this is a lockless
-tty_ldisc_wait_idle.
-
-On the other hand, if we fail to wait (timeout or signal), we have to
-reenable the halted ldiscs, so we take ldisc lock and reuse the setup
-path at the end of tty_set_ldisc.
-
-Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
-Tested-by: Sebastian Andrzej Siewior <bigeasy@breakpoint.cc>
-LKML-Reference: <20101031104136.GA511@Chamillionaire.breakpoint.cc>
-LKML-Reference: <1287669539-22644-1-git-send-email-jslaby@suse.cz>
-Cc: Alan Cox <alan@linux.intel.com>
-Cc: stable@kernel.org [32, 33, 36]
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/char/tty_ldisc.c |   29 +++++++++++++++++++++++++++++
- 1 files changed, 29 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
-index 412f977..5bbf33a 100644
---- a/drivers/char/tty_ldisc.c
-+++ b/drivers/char/tty_ldisc.c
-@@ -47,6 +47,7 @@
- 
- static DEFINE_SPINLOCK(tty_ldisc_lock);
- static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
-+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
- /* Line disc dispatch table */
- static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
- 
-@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
- 		return;
- 	}
- 	local_irq_restore(flags);
-+	wake_up(&tty_ldisc_idle);
- }
- 
- /**
-@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
- }
- 
- /**
-+ *	tty_ldisc_wait_idle	-	wait for the ldisc to become idle
-+ *	@tty: tty to wait for
-+ *
-+ *	Wait for the line discipline to become idle. The discipline must
-+ *	have been halted for this to guarantee it remains idle.
-+ */
-+static int tty_ldisc_wait_idle(struct tty_struct *tty)
-+{
-+	int ret;
-+	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
-+			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
-+	if (ret < 0)
-+		return ret;
-+	return ret > 0 ? 0 : -EBUSY;
-+}
-+
-+/**
-  *	tty_set_ldisc		-	set line discipline
-  *	@tty: the terminal to set
-  *	@ldisc: the line discipline
-@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
- 
- 	flush_scheduled_work();
- 
-+	retval = tty_ldisc_wait_idle(tty);
-+
- 	tty_lock();
- 	mutex_lock(&tty->ldisc_mutex);
-+
-+	/* handle wait idle failure locked */
-+	if (retval) {
-+		tty_ldisc_put(new_ldisc);
-+		goto enable;
-+	}
-+
- 	if (test_bit(TTY_HUPPED, &tty->flags)) {
- 		/* We were raced by the hangup method. It will have stomped
- 		   the ldisc data and closed the ldisc down */
-@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
- 
- 	tty_ldisc_put(o_ldisc);
- 
-+enable:
- 	/*
- 	 *	Allow ldisc referencing to occur again
- 	 */
--- 
-1.7.3.2
-
diff --git a/xhci_hcd-suspend-resume.patch b/xhci_hcd-suspend-resume.patch
index ffbaeb421..2f8816973 100644
--- a/xhci_hcd-suspend-resume.patch
+++ b/xhci_hcd-suspend-resume.patch
@@ -1240,9 +1240,9 @@ index 34a60d9..b6d8033 100644
   *
   * xHC interrupts have been disabled and a watchdog timer will (or has already)
 @@ -1199,6 +1228,10 @@ struct xhci_hcd {
- #define	XHCI_LINK_TRB_QUIRK	(1 << 0)
- #define XHCI_RESET_EP_QUIRK	(1 << 1)
- #define XHCI_NEC_HOST		(1 << 2)
+	/* Array of pointers to USB 2.0 PORTSC registers */
+	u32 __iomem		**usb2_ports;
+	unsigned int		num_usb2_ports;
 +	u32			port_c_suspend[8];	/* port suspend change*/
 +	u32			suspended_ports[8];	/* which ports are
 +							   suspended */

From 738764956426a09304f07623f75f711a013ef55a Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Thu, 9 Dec 2010 13:40:55 -0500
Subject: [PATCH 55/56] fix merge remnants

---
 kernel.spec | 41 -----------------------------------------
 1 file changed, 41 deletions(-)

diff --git a/kernel.spec b/kernel.spec
index 36dda5f23..2d354f16b 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -703,31 +703,12 @@ Patch12205: runtime_pm_fixups.patch
 
 Patch12303: dmar-disable-when-ricoh-multifunction.patch
 
-Patch12401: debug-tty-print-dev-name.patch
-
 Patch12410: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 Patch12411: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
 
 # rhbz#650934
 Patch12420: sched-cure-more-NO_HZ-load-average-woes.patch
 
-Patch12308: fix-i8k-inline-asm.patch
-
-Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
-Patch12408: netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
-
-Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
-
-Patch12410: tty-make-tiocgicount-a-handler.patch
-Patch12411: tty-icount-changeover-for-other-main-devices.patch
-
-Patch12413: tpm-autodetect-itpm-devices.patch
-
-Patch12420: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
-Patch12421: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
-
-Patch12430: sched-cure-more-NO_HZ-load-average-woes.patch
-
 %endif
 
 BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
@@ -1318,28 +1299,6 @@ ApplyPatch runtime_pm_fixups.patch
 # rhbz#605888
 ApplyPatch dmar-disable-when-ricoh-multifunction.patch
 
-# rhbz#630464
-ApplyPatch debug-tty-print-dev-name.patch
-
-# backport some fixes for kswapd from mmotm, rhbz#649694
-ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
-ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
-
-ApplyPatch fix-i8k-inline-asm.patch
-
-# rhbz#651264 (CVE-2010-3880)
-ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
-ApplyPatch netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
-
-# rhbz#656264
-ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
-
-# CVE-2010-4077, CVE-2010-4075 (rhbz#648660, #648663)
-ApplyPatch tty-make-tiocgicount-a-handler.patch
-ApplyPatch tty-icount-changeover-for-other-main-devices.patch
-
-ApplyPatch tpm-autodetect-itpm-devices.patch
-
 # backport some fixes for kswapd from mmotm, rhbz#649694
 ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch

From 72b3729657a0c46f187e4a6c500b239a0e4d391a Mon Sep 17 00:00:00 2001
From: Kyle McMartin <kyle@redhat.com>
Date: Thu, 9 Dec 2010 13:46:11 -0500
Subject: [PATCH 56/56] Revert "Merge branch 'kernel-git' strategy 'theirs'"

This reverts commit a3f45c4a9c4acadb3b0e3531a0d4d3649f711ebf, reversing
changes made to ee99a68c51ebd08409e45b63796f3c971c27e3d6.
---
 .gitignore                                    |    4 +-
 Makefile                                      |   10 +-
 acpi-ec-add-delay-before-write.patch          |    5 +-
 ...ery-information-on-notification-0x81.patch |    4 +-
 btusb-macbookpro-6-2.patch                    |   41 +
 btusb-macbookpro-7-1.patch                    |   42 +
 config-arm                                    |    7 -
 config-debug                                  |    6 +-
 config-generic                                |  107 +-
 config-nodebug                                |   12 +-
 config-powerpc-generic                        |    2 -
 config-s390x                                  |    5 -
 config-sparc64-generic                        |    2 -
 config-x86-generic                            |   21 +-
 config-x86_64-generic                         |   18 -
 debug-tty-print-dev-name.patch                |   14 +-
 drm-intel-big-hammer.patch                    |   21 +-
 drm-intel-edp-fixes.patch                     |   44 -
 drm-intel-make-lvds-work.patch                |   22 +-
 hdpvr-ir-enable.patch                         |   15 +-
 kernel.spec                                   |  262 ++--
 linux-2.6-32bit-mmap-exec-randomization.patch |    2 +-
 linux-2.6-bluetooth-autosuspend.patch         |  159 ++
 linux-2.6-crash-driver.patch                  |   24 +-
 linux-2.6-enable-more-pci-autosuspend.patch   |  276 ++--
 linux-2.6-i386-nx-emulation.patch             |    2 +-
 linux-2.6-qcserial-autosuspend.patch          |   23 +
 linux-2.6-utrace-ptrace.patch                 |   35 +-
 linux-2.6-utrace.patch                        |    6 +-
 linux-2.6-uvc-autosuspend.patch               |   22 +
 pci-crs-fixes.patch                           |  593 ++++++++
 runtime_pm_fixups.patch                       |    2 +-
 sources                                       |    4 +-
 ...m-01-add-fuzz-parameters-to-features.patch |   69 +
 wacom-02-parse-the-bamboo-device-family.patch |  122 ++
 ...t-device-quirks-into-single-function.patch |  107 ++
 ...upport-for-the-bamboo-touch-trackpad.patch |  172 +++
 ...rk-for-low-resolution-bamboo-devices.patch |   69 +
 ...-request-tablet-data-for-bamboo-pens.patch |   55 +
 ...bamboo-touch-irq-to-its-own-function.patch |   52 +
 wacom-08-add-support-for-bamboo-pen.patch     |  130 ++
 ...mboo-touchpad-when-pen-is-being-used.patch |   31 +
 xhci_hcd-suspend-resume.patch                 | 1289 +++++++++++++++++
 43 files changed, 3430 insertions(+), 478 deletions(-)
 create mode 100644 btusb-macbookpro-6-2.patch
 create mode 100644 btusb-macbookpro-7-1.patch
 delete mode 100644 drm-intel-edp-fixes.patch
 create mode 100644 linux-2.6-bluetooth-autosuspend.patch
 create mode 100644 linux-2.6-qcserial-autosuspend.patch
 create mode 100644 linux-2.6-uvc-autosuspend.patch
 create mode 100644 pci-crs-fixes.patch
 create mode 100644 wacom-01-add-fuzz-parameters-to-features.patch
 create mode 100644 wacom-02-parse-the-bamboo-device-family.patch
 create mode 100644 wacom-03-collect-device-quirks-into-single-function.patch
 create mode 100644 wacom-04-add-support-for-the-bamboo-touch-trackpad.patch
 create mode 100644 wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch
 create mode 100644 wacom-06-request-tablet-data-for-bamboo-pens.patch
 create mode 100644 wacom-07-move-bamboo-touch-irq-to-its-own-function.patch
 create mode 100644 wacom-08-add-support-for-bamboo-pen.patch
 create mode 100644 wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch
 create mode 100644 xhci_hcd-suspend-resume.patch

diff --git a/.gitignore b/.gitignore
index 05a9df52c..8ee6321e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,5 +3,5 @@ patch-*.bz2
 clog
 *.rpm
 kernel-2.6.*/
-/patch-2.6.37-rc5.bz2
-/patch-2.6.37-rc5-git2.bz2
+/patch-2.6.36.1.bz2
+/patch-2.6.36.2-rc1.bz2
diff --git a/Makefile b/Makefile
index ca62cce42..f925397d1 100644
--- a/Makefile
+++ b/Makefile
@@ -77,13 +77,12 @@ debug:
 	@perl -pi -e 's/# CONFIG_DEBUG_CFQ_IOSCHED is not set/CONFIG_DEBUG_CFQ_IOSCHED=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_DRBD_FAULT_INJECTION is not set/CONFIG_DRBD_FAULT_INJECTION=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_ATH_DEBUG is not set/CONFIG_ATH_DEBUG=y/' config-nodebug
-	@perl -pi -e 's/# CONFIG_CARL9170_DEBUGFS is not set/CONFIG_CARL9170_DEBUGFS=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_IWLWIFI_DEVICE_TRACING is not set/CONFIG_IWLWIFI_DEVICE_TRACING=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_DEBUG_OBJECTS_WORK is not set/CONFIG_DEBUG_OBJECTS_WORK=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set/CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_DMADEVICES_DEBUG is not set/CONFIG_DMADEVICES_DEBUG=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_DMADEVICES_VDEBUG is not set/CONFIG_DMADEVICES_VDEBUG=y/' config-nodebug
-	@perl -pi -e 's/# CONFIG_CEPH_LIB_PRETTYDEBUG is not set/CONFIG_CEPH_LIB_PRETTYDEBUG=y/' config-nodebug
+	@perl -pi -e 's/# CONFIG_CEPH_FS_PRETTYDEBUG is not set/CONFIG_CEPH_FS_PRETTYDEBUG=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_QUOTA_DEBUG is not set/CONFIG_QUOTA_DEBUG=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_KGDB_KDB is not set/CONFIG_KGDB_KDB=y/' config-nodebug
 	@perl -pi -e 's/# CONFIG_KDB_KEYBOARD is not set/CONFIG_KDB_KEYBOARD=y/' config-nodebug
@@ -91,8 +90,6 @@ debug:
 	@perl -pi -e 's/# CONFIG_DEBUG_PER_CPU_MAPS is not set/CONFIG_DEBUG_PER_CPU_MAPS=y/' config-nodebug
 	@perl -pi -e 's/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y/# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set/' config-nodebug
 	#@perl -pi -e 's/# CONFIG_PCI_DEFAULT_USE_CRS is not set/CONFIG_PCI_DEFAULT_USE_CRS=y/' config-nodebug
-	@perl -pi -e 's/# CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER is not set/CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y/' config-nodebug
-	@perl -pi -e 's/# CONFIG_TEST_LIST_SORT is not set/CONFIG_TEST_LIST_SORT=y/' config-nodebug
 
 	@# just in case we're going from extremedebug -> debug
 	@perl -pi -e 's/CONFIG_DEBUG_PAGEALLOC=y/# CONFIG_DEBUG_PAGEALLOC is not set/' config-nodebug
@@ -154,13 +151,12 @@ release:
 	@perl -pi -e 's/CONFIG_DEBUG_CFQ_IOSCHED=y/# CONFIG_DEBUG_CFQ_IOSCHED is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_DRBD_FAULT_INJECTION=y/# CONFIG_DRBD_FAULT_INJECTION is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_ATH_DEBUG=y/# CONFIG_ATH_DEBUG is not set/' config-nodebug
-	@perl -pi -e 's/CONFIG_CARL9170_DEBUGFS=y/# CONFIG_CARL9170_DEBUGFS is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_IWLWIFI_DEVICE_TRACING=y/# CONFIG_IWLWIFI_DEVICE_TRACING is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_DEBUG_OBJECTS_WORK=y/# CONFIG_DEBUG_OBJECTS_WORK is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y/# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_DMADEVICES_DEBUG=y/# CONFIG_DMADEVICES_DEBUG is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_DMADEVICES_VDEBUG=y/# CONFIG_DMADEVICES_VDEBUG is not set/' config-nodebug
-	@perl -pi -e 's/CONFIG_CEPH_LIB_PRETTYDEBUG=y/# CONFIG_CEPH_LIB_PRETTYDEBUG is not set/' config-nodebug
+	@perl -pi -e 's/CONFIG_CEPH_FS_PRETTYDEBUG=y/# CONFIG_CEPH_FS_PRETTYDEBUG is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_QUOTA_DEBUG=y/# CONFIG_QUOTA_DEBUG is not set/' config-nodebug
 	@perl -pi -e 's/CONFIG_CPU_NOTIFIER_ERROR_INJECT=m/# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set/' config-nodebug
 	#@perl -pi -e 's/CONFIG_KGDB_KDB=y/# CONFIG_KGDB_KDB is not set/' config-nodebug
@@ -168,8 +164,6 @@ release:
 	@perl -pi -e 's/CONFIG_DEBUG_PER_CPU_MAPS=y/# CONFIG_DEBUG_PER_CPU_MAPS is not set/' config-nodebug
 	@perl -pi -e 's/# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y/' config-nodebug
 	#@perl -pi -e 's/CONFIG_PCI_DEFAULT_USE_CRS=y/# CONFIG_PCI_DEFAULT_USE_CRS is not set/' config-nodebug
-	@perl -pi -e 's/CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y/# CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER is not set/' config-nodebug
-	@perl -pi -e 's/CONFIG_TEST_LIST_SORT=y/# CONFIG_TEST_LIST_SORT is not set/' config-nodebug
 
 	@perl -pi -e 's/CONFIG_DEBUG_PAGEALLOC=y/# CONFIG_DEBUG_PAGEALLOC is not set/' config-debug
 	@perl -pi -e 's/CONFIG_DEBUG_PAGEALLOC=y/# CONFIG_DEBUG_PAGEALLOC is not set/' config-nodebug
diff --git a/acpi-ec-add-delay-before-write.patch b/acpi-ec-add-delay-before-write.patch
index f1cb50817..af49cccbd 100644
--- a/acpi-ec-add-delay-before-write.patch
+++ b/acpi-ec-add-delay-before-write.patch
@@ -23,7 +23,7 @@ index 27e0b92..09fbb69 100644
  		pr_debug(PREFIX "controller reset, restart transaction\n");
  		spin_lock_irqsave(&ec->curr_lock, flags);
  		start_transaction(ec);
-@@ -271,15 +272,24 @@ static int ec_check_ibf0(struct acpi_ec *ec)
+@@ -271,15 +272,25 @@ static int ec_check_ibf0(struct acpi_ec *ec)
  	return (status & ACPI_EC_FLAG_IBF) == 0;
  }
  
@@ -38,7 +38,8 @@ index 27e0b92..09fbb69 100644
 +
  static int ec_wait_ibf0(struct acpi_ec *ec)
  {
- 	unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
++	
+ 	unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
  	/* interrupt wait manually if GPE mode is not active */
  	while (time_before(jiffies, delay))
  		if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
diff --git a/acpi-update-battery-information-on-notification-0x81.patch b/acpi-update-battery-information-on-notification-0x81.patch
index 3704b054d..3a8d619ce 100644
--- a/acpi-update-battery-information-on-notification-0x81.patch
+++ b/acpi-update-battery-information-on-notification-0x81.patch
@@ -56,9 +56,9 @@ index 95649d3..2a774a8 100644
  }
  
 @@ -914,7 +923,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
- 	if (!battery)
- 		return;
+ #ifdef CONFIG_ACPI_SYSFS_POWER
  	old = battery->bat.dev;
+ #endif
 -	acpi_battery_update(battery);
 +	acpi_battery_update(battery, (event == ACPI_BATTERY_NOTIFY_INFO ? true
 +				      : false));
diff --git a/btusb-macbookpro-6-2.patch b/btusb-macbookpro-6-2.patch
new file mode 100644
index 000000000..d65a30960
--- /dev/null
+++ b/btusb-macbookpro-6-2.patch
@@ -0,0 +1,41 @@
+From kernel-bounces@lists.fedoraproject.org Fri Sep 17 17:09:15 2010
+From: Will Woods <wwoods@redhat.com>
+To: Marcel Holtmann <marcel@holtmann.org>
+Subject: [PATCH 2/2] bluetooth: add support for controller in MacBookPro6,2
+Date: Fri, 17 Sep 2010 17:09:21 -0400
+
+Once again the device class is ff(vend.) instead of e0(wlcon).
+
+output from 'usb-devices':
+T:  Bus=01 Lev=03 Prnt=03 Port=02 Cnt=03 Dev#=  8 Spd=12  MxCh= 0
+D:  Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs=  1
+P:  Vendor=05ac ProdID=8218 Rev=00.22
+S:  Manufacturer=Apple Inc.
+S:  Product=Bluetooth USB Host Controller
+C:  #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr=0mA
+I:  If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none)
+I:  If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none)
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+I:  If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none)
+
+Signed-off-by: Will Woods <wwoods@redhat.com>
+---
+ drivers/bluetooth/btusb.c |    3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index eac44e4..320e798 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -65,6 +65,9 @@ static struct usb_device_id btusb_table[] = {
+ 	/* Apple iMac11,1 */
+ 	{ USB_DEVICE(0x05ac, 0x8215) },
+ 
++	/* Apple MacBookPro6,2 */
++	{ USB_DEVICE(0x05ac, 0x8218) },
++
+ 	/* AVM BlueFRITZ! USB v2.0 */
+ 	{ USB_DEVICE(0x057c, 0x3800) },
+ 
+-- 
+1.7.2.3
diff --git a/btusb-macbookpro-7-1.patch b/btusb-macbookpro-7-1.patch
new file mode 100644
index 000000000..3e07dd583
--- /dev/null
+++ b/btusb-macbookpro-7-1.patch
@@ -0,0 +1,42 @@
+From kernel-bounces@lists.fedoraproject.org Fri Sep 17 17:09:18 2010
+From: Will Woods <wwoods@redhat.com>
+To: Marcel Holtmann <marcel@holtmann.org>
+Subject: [PATCH 1/2] bluetooth: add support for controller in MacBookPro7,1
+Date: Fri, 17 Sep 2010 17:09:20 -0400
+
+As with iMac11,1 the device class is ff(vend.) instead of e0(wlcon).
+
+output from 'usb-devices':
+T:  Bus=04 Lev=02 Prnt=04 Port=00 Cnt=01 Dev#=  5 Spd=12  MxCh= 0
+D:  Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs=  1
+P:  Vendor=05ac ProdID=8213 Rev=01.86
+S:  Manufacturer=Apple Inc.
+S:  Product=Bluetooth USB Host Controller
+S:  SerialNumber=58B0359C28ED
+C:  #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr=0mA
+I:  If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb
+I:  If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+I:  If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+I:  If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=00 Driver=(none)
+
+Signed-off-by: Will Woods <wwoods@redhat.com>
+---
+ drivers/bluetooth/btusb.c |    3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index d22ce3c..eac44e4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -59,6 +59,9 @@ static struct usb_device_id btusb_table[] = {
+ 	/* Generic Bluetooth USB device */
+ 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ 
++	/* Apple MacBookPro7,1 */
++	{ USB_DEVICE(0x05ac, 0x8213) },
++
+ 	/* Apple iMac11,1 */
+ 	{ USB_DEVICE(0x05ac, 0x8215) },
+ 
+-- 
+1.7.2.3
diff --git a/config-arm b/config-arm
index 22335ef40..8d7e46a49 100644
--- a/config-arm
+++ b/config-arm
@@ -122,10 +122,3 @@ CONFIG_AUTO_ZRELADDR=y
 # CONFIG_DEPRECATED_PARAM_STRUCT is not set
 
 # CONFIG_ARM_SP805_WATCHDOG is not set
-
-CONFIG_PM_OPP=y
-
-CONFIG_SECCOMP=y
-CONFIG_STRICT_DEVMEM=y
-
-# CONFIG_AMBA_PL08X is not set
diff --git a/config-debug b/config-debug
index 5366a8461..441445364 100644
--- a/config-debug
+++ b/config-debug
@@ -74,7 +74,6 @@ CONFIG_DEBUG_CFQ_IOSCHED=y
 CONFIG_DRBD_FAULT_INJECTION=y
 
 CONFIG_ATH_DEBUG=y
-CONFIG_CARL9170_DEBUGFS=y
 CONFIG_IWLWIFI_DEVICE_TRACING=y
 
 CONFIG_DEBUG_OBJECTS_WORK=y
@@ -85,7 +84,7 @@ CONFIG_DMADEVICES_VDEBUG=y
 
 CONFIG_PM_ADVANCED_DEBUG=y
 
-CONFIG_CEPH_LIB_PRETTYDEBUG=y
+CONFIG_CEPH_FS_PRETTYDEBUG=y
 CONFIG_QUOTA_DEBUG=y
 
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -94,6 +93,3 @@ CONFIG_PCI_DEFAULT_USE_CRS=y
 
 CONFIG_KGDB_KDB=y
 CONFIG_KDB_KEYBOARD=y
-
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_TEST_LIST_SORT=y
diff --git a/config-generic b/config-generic
index 492df86ec..28597be1f 100644
--- a/config-generic
+++ b/config-generic
@@ -119,13 +119,12 @@ CONFIG_PCMCIA_IOCTL=y
 
 CONFIG_PCCARD=y
 CONFIG_MMC=m
+CONFIG_MMC_BLOCK_BOUNCE=y
 CONFIG_SDIO_UART=m
 # CONFIG_MMC_TEST is not set
 # CONFIG_MMC_DEBUG is not set
 # CONFIG_MMC_UNSAFE_RESUME is not set
 CONFIG_MMC_BLOCK=m
-CONFIG_MMC_BLOCK_MINORS=8
-CONFIG_MMC_BLOCK_BOUNCE=y
 CONFIG_MMC_SDHCI=m
 CONFIG_MMC_SDHCI_PCI=m
 CONFIG_MMC_SDRICOH_CS=m
@@ -135,7 +134,6 @@ CONFIG_MMC_VIA_SDMMC=m
 CONFIG_MMC_SDHCI_PLTFM=m
 CONFIG_MMC_CB710=m
 CONFIG_MMC_RICOH_MMC=y
-CONFIG_MMC_USHC=m
 
 CONFIG_CB710_CORE=m
 # CONFIG_CB710_DEBUG is not set
@@ -156,7 +154,6 @@ CONFIG_INFINIBAND_AMSO1100=m
 # CONFIG_INFINIBAND_AMSO1100_DEBUG is not set
 CONFIG_INFINIBAND_CXGB3=m
 CONFIG_INFINIBAND_CXGB4=m
-CONFIG_SCSI_CXGB4_ISCSI=m
 # CONFIG_INFINIBAND_CXGB3_DEBUG is not set
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_INFINIBAND_NES=m
@@ -368,7 +365,6 @@ CONFIG_BLK_DEV_DELKIN=m
 CONFIG_LBDAF=y
 CONFIG_BLK_DEV_BSG=y
 CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_BLK_DEV_THROTTLING=y
 
 
 #
@@ -763,7 +759,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_NF_SECURITY=m
 # CONFIG_IP_PNP is not set
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
@@ -802,7 +797,6 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
-CONFIG_IP_VS_PE_SIP=m
 
 CONFIG_IPV6=m
 CONFIG_IPV6_PRIVACY=y
@@ -1106,7 +1100,6 @@ CONFIG_NET_ACT_NAT=m
 CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_SIMP=m
-CONFIG_NET_ACT_CSUM=m
 
 CONFIG_DCB=y
 
@@ -1157,7 +1150,6 @@ CONFIG_ATM_FORE200E_TX_RETRY=16
 CONFIG_ATM_FORE200E_DEBUG=0
 
 CONFIG_ATM_HE=m
-CONFIG_PPTP=m
 CONFIG_PPPOATM=m
 CONFIG_PPPOL2TP=m
 CONFIG_ATM_NICSTAR=m
@@ -1203,7 +1195,6 @@ CONFIG_FIXED_PHY=y
 CONFIG_MDIO_BITBANG=m
 CONFIG_NATIONAL_PHY=m
 CONFIG_ICPLUS_PHY=m
-CONFIG_BCM63XX_PHY=m
 CONFIG_LSI_ET1011C_PHY=m
 CONFIG_LXT_PHY=m
 CONFIG_MARVELL_PHY=m
@@ -1221,12 +1212,6 @@ CONFIG_NET_VENDOR_3COM=y
 CONFIG_VORTEX=m
 CONFIG_TYPHOON=m
 CONFIG_DNET=m
-CONFIG_STMMAC_ETH=m
-# CONFIG_STMMAC_DA is not set
-# CONFIG_STMMAC_DUAL_MAC is not set
-# CONFIG_STMMAC_TIMER is not set
-
-# CONFIG_PCH_GBE is not set
 
 #
 # Tulip family network device support
@@ -1344,7 +1329,6 @@ CONFIG_MLX4_EN=m
 # CONFIG_MLX4_DEBUG is not set
 CONFIG_QLCNIC=m
 CONFIG_QLGE=m
-CONFIG_BNA=m
 CONFIG_SFC=m
 CONFIG_SFC_MTD=y
 CONFIG_BE2NET=m
@@ -1424,9 +1408,6 @@ CONFIG_ATH9K=m
 CONFIG_ATH9K_DEBUGFS=y
 CONFIG_ATH9K_HTC=m
 # CONFIG_ATH9K_HTC_DEBUGFS is not set
-CONFIG_ATH9K_RATE_CONTROL=y
-CONFIG_CARL9170=m
-CONFIG_CARL9170_LEDS=y
 CONFIG_AT76C50X_USB=m
 CONFIG_AIRO=m
 CONFIG_AIRO_CS=m
@@ -1481,7 +1462,6 @@ CONFIG_IWL4965=y
 CONFIG_IWL5000=y
 CONFIG_IWL3945=m
 CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y
-# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set
 CONFIG_IWM=m
 # CONFIG_IWM_DEBUG is not set
 # CONFIG_IWM_TRACING is not set
@@ -1582,8 +1562,6 @@ CONFIG_CAN_EMS_USB=m
 CONFIG_CAN_ESD_USB2=m
 CONFIG_CAN_KVASER_PCI=m
 CONFIG_CAN_PLX_PCI=m
-CONFIG_CAN_TSCAN1=m
-CONFIG_PCH_CAN=m
 CONFIG_NETROM=m
 CONFIG_ROSE=m
 CONFIG_MKISS=m
@@ -1822,7 +1800,6 @@ CONFIG_INPUT_TABLET=y
 CONFIG_TABLET_USB_ACECAD=m
 CONFIG_TABLET_USB_AIPTEK=m
 CONFIG_TABLET_USB_GTCO=m
-CONFIG_TABLET_USB_HANWANG=m
 CONFIG_TABLET_USB_KBTAB=m
 CONFIG_TABLET_USB_WACOM=m
 
@@ -1846,7 +1823,6 @@ CONFIG_SERIO_I8042=y
 CONFIG_SERIO_SERPORT=y
 CONFIG_SERIO_RAW=m
 CONFIG_SERIO_ALTERA_PS2=m
-# CONFIG_SERIO_PS2MULT is not set
 
 # CONFIG_SERIO_CT82C710 is not set
 # CONFIG_SERIO_PARKBD is not set
@@ -1940,7 +1916,6 @@ CONFIG_TOUCHSCREEN_USB_E2I=y
 CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
 # CONFIG_TOUCHSCREEN_WM97XX is not set
 CONFIG_TOUCHSCREEN_W90X900=m
-# CONFIG_TOUCHSCREEN_BU21013 is not set
 
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PCSPKR=m
@@ -2066,7 +2041,6 @@ CONFIG_I2C_ALGOPCA=m
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_ISCH is not set
 # CONFIG_I2C_NFORCE2_S4985 is not set
-# CONFIG_I2C_INTEL_MID is not set
 
 CONFIG_EEPROM_AT24=m
 CONFIG_EEPROM_LEGACY=m
@@ -2161,11 +2135,6 @@ CONFIG_SENSORS_SMSC47M192=m
 CONFIG_SENSORS_SMSC47B397=m
 CONFIG_SENSORS_THMC50=m
 CONFIG_SENSORS_TMP401=m
-CONFIG_APDS9802ALS=m
-CONFIG_ISL29020=m
-CONFIG_ISL29003=m
-CONFIG_SENSORS_BH1770=m
-CONFIG_SENSORS_APDS990X=m
 CONFIG_SENSORS_TSL2550=m
 CONFIG_SENSORS_VIA686A=m
 CONFIG_SENSORS_VIA_CPUTEMP=m
@@ -2190,18 +2159,13 @@ CONFIG_SENSORS_ADT7411=m
 CONFIG_SENSORS_ASC7621=m
 CONFIG_SENSORS_EMC1403=m
 CONFIG_SENSORS_TMP102=m
-CONFIG_SENSORS_LTC4261=m
 # CONFIG_SENSORS_BH1780 is not set
 # CONFIG_SENSORS_JC42 is not set
 # CONFIG_SENSORS_SMM665 is not set
 # CONFIG_SENSORS_EMC2103 is not set
-# CONFIG_SENSORS_GPIO_FAN is not set
-CONFIG_SENSORS_W83795=m
-# CONFIG_SENSORS_W83795_FANCTRL is not set
 
 # CONFIG_HMC6352 is not set
 # CONFIG_BMP085 is not set
-# CONFIG_PCH_PHUB is not set
 
 CONFIG_W1=m
 CONFIG_W1_CON=y
@@ -2340,7 +2304,6 @@ CONFIG_AGP_VIA=y
 CONFIG_AGP_EFFICEON=y
 CONFIG_VGA_ARB=y
 CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_STUB_POULSBO=m
 CONFIG_DRM=m
 CONFIG_DRM_TDFX=m
 CONFIG_DRM_R128=m
@@ -2399,8 +2362,7 @@ CONFIG_VIDEO_AU0828=m
 CONFIG_VIDEO_BT848=m
 CONFIG_VIDEO_BT848_DVB=y
 CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_SR030PC30=m
-CONFIG_VIDEO_CAFE_CCIC=m
+# CONFIG_VIDEO_CAFE_CCIC is not set
 # CONFIG_VIDEO_CPIA is not set
 CONFIG_VIDEO_CPIA2=m
 CONFIG_VIDEO_CQCAM=m
@@ -2432,7 +2394,6 @@ CONFIG_VIDEO_SAA6588=m
 CONFIG_VIDEO_SAA7134=m
 CONFIG_VIDEO_SAA7134_ALSA=m
 CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_SAA7134_RC=y
 CONFIG_VIDEO_STRADIS=m
 CONFIG_VIDEO_USBVISION=m
 CONFIG_VIDEO_W9966=m
@@ -2475,7 +2436,6 @@ CONFIG_MEDIA_TUNER_MXL5005S=m
 CONFIG_MEDIA_TUNER_MXL5007T=m
 CONFIG_MEDIA_TUNER_MC44S803=m
 CONFIG_MEDIA_TUNER_MAX2165=m
-CONFIG_MEDIA_TUNER_TDA18218=m
 
 #
 # Digital Video Broadcasting Devices
@@ -2515,8 +2475,6 @@ CONFIG_DVB_SP8870=m
 CONFIG_DVB_SP887X=m
 CONFIG_DVB_CX22700=m
 CONFIG_DVB_CX22702=m
-CONFIG_DVB_S5H1432=m
-CONFIG_DVB_IX2505V=m
 CONFIG_DVB_L64781=m
 CONFIG_DVB_NXT6000=m
 CONFIG_DVB_MT352=m
@@ -2621,7 +2579,6 @@ CONFIG_DVB_USB_UMT_010=m
 CONFIG_DVB_USB_VP702X=m
 CONFIG_DVB_USB_VP7045=m
 CONFIG_DVB_USB_AZ6027=m
-CONFIG_DVB_USB_LME2510=m
 
 CONFIG_DVB_PT1=m
 
@@ -2643,11 +2600,9 @@ CONFIG_IR_RC5_DECODER=m
 CONFIG_IR_RC6_DECODER=m
 CONFIG_IR_JVC_DECODER=m
 CONFIG_IR_SONY_DECODER=m
-CONFIG_IR_RC5_SZ_DECODER=m
 CONFIG_IR_LIRC_CODEC=m
 CONFIG_IR_IMON=m
 CONFIG_IR_MCEUSB=m
-CONFIG_IR_NUVOTON=m
 
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 # CONFIG_VIDEO_MEM2MEM_TESTDEV is not set
@@ -2792,7 +2747,6 @@ CONFIG_SND_DYNAMIC_MINORS=y
 # Generic devices
 #
 CONFIG_SND_DUMMY=m
-CONFIG_SND_ALOOP=m
 CONFIG_SND_VIRMIDI=m
 CONFIG_SND_MTPAV=m
 CONFIG_SND_MTS64=m
@@ -2811,8 +2765,6 @@ CONFIG_SND_AD1889=m
 # CONFIG_SND_WAVEFRONT is not set
 # CONFIG_SND_MSND_PINNACLE is not set
 # CONFIG_SND_MSND_CLASSIC is not set
-# CONFIG_SND_AZT1605 is not set
-# CONFIG_SND_AZT2316 is not set
 
 #
 # PCI devices
@@ -2867,7 +2819,6 @@ CONFIG_SND_HDA_CODEC_CMEDIA=y
 CONFIG_SND_HDA_CODEC_INTELHDMI=y
 CONFIG_SND_HDA_CODEC_SI3054=y
 CONFIG_SND_HDA_CODEC_NVHDMI=y
-CONFIG_SND_HDA_CODEC_HDMI=y
 CONFIG_SND_HDA_GENERIC=y
 CONFIG_SND_HDA_POWER_SAVE=y
 CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
@@ -2985,8 +2936,6 @@ CONFIG_USB_STORAGE_ONETOUCH=y
 CONFIG_USB_STORAGE_ALAUDA=y
 CONFIG_USB_STORAGE_KARMA=y
 # CONFIG_USB_LIBUSUAL is not set
-CONFIG_USB_UAS=m
-
 
 #
 # USB Human Interface Devices (HID)
@@ -3000,7 +2949,6 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_HID_PID=y
 CONFIG_LOGITECH_FF=y
-CONFIG_LOGIWII_FF=y
 CONFIG_LOGIRUMBLEPAD2_FF=y
 CONFIG_PANTHERLORD_FF=y
 CONFIG_THRUSTMASTER_FF=y
@@ -3042,9 +2990,7 @@ CONFIG_HID_ZEROPLUS=m
 CONFIG_HID_ZYDACRON=m
 CONFIG_HID_ACRUX_FF=m
 CONFIG_HID_ELECOM=m
-CONFIG_HID_UCLOGIC=m
-CONFIG_HID_WALTOP=m
-CONFIG_HID_ROCCAT_PYRA=m
+
 
 #
 # USB Imaging devices
@@ -3099,8 +3045,6 @@ CONFIG_USB_GSPCA_PAC7302=m
 CONFIG_USB_GSPCA_STV0680=m
 CONFIG_USB_GL860=m
 CONFIG_USB_GSPCA_JEILINJ=m
-CONFIG_USB_GSPCA_KONICA=m
-CONFIG_USB_GSPCA_XIRLINK_CIT=m
 CONFIG_USB_GSPCA_SPCA1528=m
 CONFIG_USB_GSPCA_SQ930X=m
 
@@ -3125,8 +3069,6 @@ CONFIG_SOC_CAMERA_OV772X=m
 CONFIG_SOC_CAMERA_MT9T112=m
 CONFIG_SOC_CAMERA_RJ54N1=m
 CONFIG_SOC_CAMERA_OV9640=m
-CONFIG_SOC_CAMERA_OV6650=m
-CONFIG_SOC_CAMERA_IMX074=m
 
 #
 # USB Network adaptors
@@ -3149,7 +3091,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
 CONFIG_USB_NET_CDC_SUBSET=m
 CONFIG_USB_NET_CDC_EEM=m
 CONFIG_USB_NET_ZAURUS=m
-CONFIG_USB_NET_CX82310_ETH=m
 CONFIG_USB_NET_INT51X1=m
 CONFIG_USB_CDC_PHONET=m
 CONFIG_USB_IPHETH=m
@@ -3242,7 +3183,6 @@ CONFIG_USB_SERIAL_QCAUX=m
 CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
 CONFIG_USB_SERIAL_DEBUG=m
 CONFIG_USB_SERIAL_SSU100=m
-CONFIG_USB_SERIAL_SAMBA=m
 
 CONFIG_USB_EZUSB=y
 CONFIG_USB_EMI62=m
@@ -3276,7 +3216,6 @@ CONFIG_USB_FILE_STORAGE=m
 # CONFIG_USB_OXU210HP_HCD is not set
 CONFIG_USB_IOWARRIOR=m
 CONFIG_USB_ISIGHTFW=m
-CONFIG_USB_YUREX=m
 CONFIG_USB_VST=m
 CONFIG_USB_LCD=m
 CONFIG_USB_LD=m
@@ -3331,7 +3270,6 @@ CONFIG_RTC_DRV_DS3232=m
 CONFIG_RTC_DRV_ISL12022=m
 
 CONFIG_MFD_SUPPORT=y
-CONFIG_MFD_VX855=m
 CONFIG_MFD_SM501=m
 CONFIG_MFD_SM501_GPIO=y
 # CONFIG_MFD_TC6393XB is not set
@@ -3352,7 +3290,6 @@ CONFIG_MFD_WM8400=m
 # CONFIG_ABX500_CORE is not set
 # CONFIG_MFD_RDC321X is not set
 # CONFIG_MFD_JANZ_CMODIO is not set
-# CONFIG_MFD_WM831X_I2C is not set
 
 #
 # File systems
@@ -3411,8 +3348,6 @@ CONFIG_EXOFS_FS=m
 CONFIG_NILFS2_FS=m
 CONFIG_LOGFS=m
 CONFIG_CEPH_FS=m
-CONFIG_BLK_DEV_RBD=m
-CONFIG_CEPH_LIB=m
 
 CONFIG_FSCACHE=m
 CONFIG_FSCACHE_STATS=y
@@ -3489,7 +3424,6 @@ CONFIG_UFS_FS=m
 # CONFIG_UFS_DEBUG is not set
 CONFIG_9P_FS=m
 CONFIG_9P_FSCACHE=y
-CONFIG_9P_FS_POSIX_ACL=y
 CONFIG_FUSE_FS=m
 CONFIG_OMFS_FS=m
 CONFIG_CUSE=m
@@ -3509,8 +3443,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFS_FSCACHE=y
 # CONFIG_NFS_USE_LEGACY_DNS is not set
-# CONFIG_NFS_USE_NEW_IDMAPPER is not set
-# CONFIG_NFSD_DEPRECATED is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=m
@@ -3528,8 +3460,7 @@ CONFIG_CIFS_EXPERIMENTAL=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
-CONFIG_CIFS_FSCACHE=y
-CONFIG_CIFS_ACL=y
+# CONFIG_CIFS_FSCACHE is not set
 CONFIG_CIFS_WEAK_PW_HASH=y
 # CONFIG_CIFS_DEBUG2 is not set
 CONFIG_CIFS_DFS_UPCALL=y
@@ -3671,7 +3602,6 @@ CONFIG_KGDB_LOW_LEVEL_TRAP=y
 # Security options
 #
 CONFIG_SECURITY=y
-# CONFIG_SECURITY_DMESG_RESTRICT is not set
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_NETWORK_XFRM=y
 # CONFIG_SECURITY_PATH is not set
@@ -3811,12 +3741,10 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y # XXX disabled by default, pass 'swapaccount'
-# CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set 
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
 CONFIG_BLK_CGROUP=y
 # CONFIG_DEBUG_BLK_CGROUP is not set
 
-# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
 
 CONFIG_RELAY=y
@@ -4022,8 +3950,6 @@ CONFIG_LEDS_REGULATOR=m
 CONFIG_LEDS_LT3593=m
 CONFIG_LEDS_TRIGGER_GPIO=m
 CONFIG_LEDS_INTEL_SS4200=m
-CONFIG_LEDS_LP5521=m
-CONFIG_LEDS_LP5523=m
 
 CONFIG_DMADEVICES=y
 CONFIG_DMA_ENGINE=y
@@ -4077,8 +4003,6 @@ CONFIG_APM_POWER=m
 CONFIG_WM831X_POWER=m
 # CONFIG_BATTERY_DS2760 is not set
 # CONFIG_BATTERY_DS2782 is not set
-# CONFIG_BATTERY_BQ20Z75 is not set
-# CONFIG_CHARGER_ISP1704 is not set
 CONFIG_BATTERY_PMU=m
 CONFIG_BATTERY_BQ27x00=m
 CONFIG_BATTERY_MAX17040=m
@@ -4139,10 +4063,13 @@ CONFIG_RESOURCE_COUNTERS=y
 #FIXME: x86 generic?
 CONFIG_LEDS_CLEVO_MAIL=m
 CONFIG_I8K=m
+CONFIG_EDAC_I3000=m
+CONFIG_EDAC_X38=m
 CONFIG_INPUT_APANEL=m
 
 # CONFIG_INTEL_MENLOW is not set
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_ISL29003=m
 CONFIG_IPWIRELESS=m
 CONFIG_RTC_DRV_DS1511=m
 
@@ -4188,7 +4115,6 @@ CONFIG_NET_SCH_MULTIQ=m
 CONFIG_NET_ACT_SKBEDIT=m
 
 CONFIG_PHONET=m
-# CONFIG_PHONET_PIPECTRLR is not set
 
 CONFIG_ICS932S401=m
 # CONFIG_C2PORT is not set
@@ -4271,13 +4197,6 @@ CONFIG_USB_ATMEL=m
 # CONFIG_EASYCAP is not set
 # CONFIG_SOLO6X10 is not set
 # CONFIG_ACPI_QUICKSTART is not set
-# CONFIG_BRCM80211 is not set
-# CONFIG_R8712U is not set
-# CONFIG_ATH6K_LEGACY is not set
-# CONFIG_USB_ENESTORAGE is not set
-# CONFIG_BCM_WIMAX is not set
-# CONFIG_FT1000 is not set
-# CONFIG_SPEAKUP is not set
 
 #
 # Android
@@ -4399,9 +4318,7 @@ CONFIG_GPIO_SYSFS=y
 # CONFIG_GPIO_SCH is not set
 # CONFIG_GPIO_LANGWELL is not set
 # CONFIG_GPIO_RDC321X is not set
-# CONFIG_GPIO_BASIC_MMIO is not set
-# CONFIG_GPIO_VX855 is not set
-# CONFIG_GPIO_PCH is not set
+
 
 CONFIG_KSYM_TRACER=y
 CONFIG_PROFILE_KSYM_TRACER=y
@@ -4417,9 +4334,3 @@ CONFIG_IR_STREAMZAP=m
 # CONFIG_MFD_STMPE is not set
 # CONFIG_MFD_MAX8998 is not set
 # CONFIG_MFD_TPS6586X is not set
-
-CONFIG_SPARSE_RCU_POINTER=y
-
-# CONFIG_PM_OPP is not set
-
-CONFIG_BKL=y
diff --git a/config-nodebug b/config-nodebug
index 3f289940f..b4472f9a5 100644
--- a/config-nodebug
+++ b/config-nodebug
@@ -73,9 +73,8 @@ CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
 
 # CONFIG_DRBD_FAULT_INJECTION is not set
 
-CONFIG_ATH_DEBUG=y
-CONFIG_CARL9170_DEBUGFS=y
-CONFIG_IWLWIFI_DEVICE_TRACING=y
+# CONFIG_ATH_DEBUG is not set
+# CONFIG_IWLWIFI_DEVICE_TRACING is not set
 
 # CONFIG_DEBUG_OBJECTS_WORK is not set
 # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
@@ -85,8 +84,8 @@ CONFIG_IWLWIFI_DEVICE_TRACING=y
 
 CONFIG_PM_ADVANCED_DEBUG=y
 
-CONFIG_CEPH_LIB_PRETTYDEBUG=y
-CONFIG_QUOTA_DEBUG=y
+# CONFIG_CEPH_FS_PRETTYDEBUG is not set
+# CONFIG_QUOTA_DEBUG is not set
 
 CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
 
@@ -94,6 +93,3 @@ CONFIG_PCI_DEFAULT_USE_CRS=y
 
 CONFIG_KGDB_KDB=y
 CONFIG_KDB_KEYBOARD=y
-
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_TEST_LIST_SORT=y
diff --git a/config-powerpc-generic b/config-powerpc-generic
index 1b05f4a90..0effe3d0d 100644
--- a/config-powerpc-generic
+++ b/config-powerpc-generic
@@ -334,5 +334,3 @@ CONFIG_SERIAL_GRLIB_GAISLER_APBUART=m
 # CONFIG_GPIO_SCH is not set
 
 # CONFIG_PPC_MPC512x is not set
-
-CONFIG_KVM_GUEST=y
diff --git a/config-s390x b/config-s390x
index 780fc9146..d9294023d 100644
--- a/config-s390x
+++ b/config-s390x
@@ -228,8 +228,3 @@ CONFIG_SMSGIUCV_EVENT=m
 CONFIG_VMCP=y
 
 CONFIG_ZFCP_DIF=y
-
-CONFIG_SCHED_MC=y
-CONFIG_SCHED_BOOK=y
-
-CONFIG_STRICT_DEVMEM=y
diff --git a/config-sparc64-generic b/config-sparc64-generic
index 61612cb71..1d21fa781 100644
--- a/config-sparc64-generic
+++ b/config-sparc64-generic
@@ -201,5 +201,3 @@ CONFIG_GRETH=m
 CONFIG_FB_XVR1000=y
 
 CONFIG_CRYPTO_DEV_NIAGARA2=y
-
-CONFIG_JUMP_LABEL=y
diff --git a/config-x86-generic b/config-x86-generic
index 68bd39b2d..ba3993563 100644
--- a/config-x86-generic
+++ b/config-x86-generic
@@ -284,9 +284,6 @@ CONFIG_EDAC_R82600=m
 CONFIG_EDAC_AMD8131=m
 CONFIG_EDAC_AMD8111=m
 CONFIG_EDAC_I7CORE=m
-CONFIG_EDAC_I3000=m
-CONFIG_EDAC_I7300=m
-CONFIG_EDAC_X38=m
 
 CONFIG_SCHED_MC=y
 
@@ -312,7 +309,6 @@ CONFIG_EEEPC_LAPTOP=m
 CONFIG_EEEPC_WMI=m
 CONFIG_FUJITSU_LAPTOP=m
 # CONFIG_FUJITSU_LAPTOP_DEBUG is not set
-CONFIG_IDEAPAD_LAPTOP=m
 CONFIG_MSI_LAPTOP=m
 CONFIG_SONY_LAPTOP=m
 CONFIG_DELL_LAPTOP=m
@@ -356,7 +352,6 @@ CONFIG_PARAVIRT=y
 # CONFIG_PARAVIRT_SPINLOCKS is not set
 CONFIG_KVM_CLOCK=y
 CONFIG_KVM_GUEST=y
-CONFIG_KVM_MMU_AUDIT=y # default $x would be nice...
 CONFIG_LGUEST_GUEST=y
 CONFIG_VMI=y
 
@@ -370,7 +365,6 @@ CONFIG_XEN_FBDEV_FRONTEND=y
 CONFIG_XEN_KBDDEV_FRONTEND=y
 CONFIG_XEN_BLKDEV_FRONTEND=m
 CONFIG_XEN_NETDEV_FRONTEND=m
-CONFIG_XEN_PCIDEV_FRONTEND=m
 CONFIG_XENFS=m
 CONFIG_XEN_COMPAT_XENFS=y
 
@@ -476,7 +470,7 @@ CONFIG_SBC_FITPC2_WATCHDOG=m
 CONFIG_EDAC_I3200=m
 CONFIG_EDAC_DECODE_MCE=m
 
-# CONFIG_GPIO_LANGWELL is not set
+CONFIG_GPIO_LANGWELL=y
 
 # CONFIG_INTEL_TXT is not set
 
@@ -512,17 +506,4 @@ CONFIG_XEN_PLATFORM_PCI=m
 # CONFIG_ACPI_QUICKSTART is not set
 CONFIG_IDEAPAD_ACPI=m
 CONFIG_INTEL_IPS=m
-# CONFIG_IBM_RTL is not set
 
-CONFIG_OLPC_XO1=m
-CONFIG_XO1_RFKILL=m
-CONFIG_VIDEO_VIA_CAMERA=m
-
-CONFIG_EDAC_MCE_INJ=m
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_X86_RESERVE_LOW=64
-
-CONFIG_PCH_GBE=m
-CONFIG_PCH_PHUB=m
-
-CONFIG_JUMP_LABEL=y
diff --git a/config-x86_64-generic b/config-x86_64-generic
index 0498d8581..b2e4a2531 100644
--- a/config-x86_64-generic
+++ b/config-x86_64-generic
@@ -204,9 +204,6 @@ CONFIG_EDAC_AMD64=m
 # CONFIG_EDAC_AMD64_ERROR_INJECTION is not set
 CONFIG_EDAC_DECODE_MCE=m
 CONFIG_EDAC_I7CORE=m
-CONFIG_EDAC_I3000=m
-CONFIG_EDAC_I7300=m
-CONFIG_EDAC_X38=m
 
 CONFIG_SCHED_MC=y
 
@@ -287,7 +284,6 @@ CONFIG_PARAVIRT=y
 # CONFIG_PARAVIRT_SPINLOCKS is not set
 CONFIG_KVM_CLOCK=y
 CONFIG_KVM_GUEST=y
-CONFIG_KVM_MMU_AUDIT=y
 
 CONFIG_XEN=y
 CONFIG_XEN_MAX_DOMAIN_MEMORY=32
@@ -299,7 +295,6 @@ CONFIG_XEN_FBDEV_FRONTEND=y
 CONFIG_XEN_KBDDEV_FRONTEND=y
 CONFIG_XEN_BLKDEV_FRONTEND=m
 CONFIG_XEN_NETDEV_FRONTEND=m
-CONFIG_XEN_PCIDEV_FRONTEND=m
 CONFIG_XENFS=m
 CONFIG_XEN_COMPAT_XENFS=y
 CONFIG_XEN_DEV_EVTCHN=m
@@ -424,18 +419,5 @@ CONFIG_XEN_PLATFORM_PCI=m
 # CONFIG_ACPI_QUICKSTART is not set
 CONFIG_IDEAPAD_ACPI=m
 CONFIG_INTEL_IPS=m
-CONFIG_IDEAPAD_LAPTOP=m
-# CONFIG_IBM_RTL is not set
-
-CONFIG_EDAC_MCE_INJ=m
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_X86_RESERVE_LOW=64
-
-CONFIG_PCH_GBE=m
-CONFIG_PCH_PHUB=m
-
-CONFIG_VIDEO_VIA_CAMERA=m
-
-CONFIG_JUMP_LABEL=y
 
 CONFIG_HP_ILO=m
diff --git a/debug-tty-print-dev-name.patch b/debug-tty-print-dev-name.patch
index 5c06dd991..507dfe5f1 100644
--- a/debug-tty-print-dev-name.patch
+++ b/debug-tty-print-dev-name.patch
@@ -1,15 +1,15 @@
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 878f6d6..8d6867d 100644
---- a/drivers/tty/tty_io.c
-+++ b/drivers/tty/tty_io.c
-@@ -1329,7 +1330,11 @@ static int tty_reopen(struct tty_struct *tty)
+diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+index 613c852..09c86d2 100644
+--- a/drivers/char/tty_io.c
++++ b/drivers/char/tty_io.c
+@@ -1322,7 +1322,11 @@ static int tty_reopen(struct tty_struct *tty)
  	tty->driver = driver; /* N.B. why do this every time?? */
  
  	mutex_lock(&tty->ldisc_mutex);
 -	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
 +	if (!test_bit(TTY_LDISC, &tty->flags)) {
-+		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s ldisc=%s flags=%x\n",
-+			__func__, tty->name, tty->ldisc ? tty->ldisc->ops ? tty->ldisc->ops->name : NULL : NULL, tty->flags);
++		printk("%s: !test_bit(TTY_LDISC, &tty->flags) dev=%s ldisc=%s\n",
++			__func__, tty->name, tty->ldisc ? tty->ldisc->ops ? tty->ldisc->ops->name : NULL : NULL);
 +		WARN_ON(1);
 +	}
  	mutex_unlock(&tty->ldisc_mutex);
diff --git a/drm-intel-big-hammer.patch b/drm-intel-big-hammer.patch
index 97bb2e8cd..63dc016b1 100644
--- a/drm-intel-big-hammer.patch
+++ b/drm-intel-big-hammer.patch
@@ -1,21 +1,16 @@
-omgwtfbbqchainsaw?
----
- drivers/gpu/drm/i915/i915_gem.c |    5 +++++
- 1 files changed, 5 insertions(+), 0 deletions(-)
-
 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 8eb8453..36fa9d7 100644
+index 37427e4..08af9db 100644
 --- a/drivers/gpu/drm/i915/i915_gem.c
 +++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -3692,6 +3692,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- 	if (ret)
- 		goto pre_mutex_err;
+@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 
+ 	mutex_lock(&dev->struct_mutex);
  
 +	/* We don't get the flushing right for these chipsets, use the
-+	 * big hammer for now to avoid random crashiness. */
++	 * big hamer for now to avoid random crashiness. */
 +	if (IS_I85X(dev) || IS_I865G(dev))
 +		wbinvd();
 +
- 	if (dev_priv->mm.suspended) {
- 		mutex_unlock(&dev->struct_mutex);
- 		ret = -EBUSY;
+ 	i915_verify_inactive(dev, __FILE__, __LINE__);
+ 
+ 	if (dev_priv->mm.wedged) {
diff --git a/drm-intel-edp-fixes.patch b/drm-intel-edp-fixes.patch
deleted file mode 100644
index c77ef116c..000000000
--- a/drm-intel-edp-fixes.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index f737960..b1f8164 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -509,6 +509,8 @@ i915_pci_remove(struct pci_dev *pdev)
- {
- 	struct drm_device *dev = pci_get_drvdata(pdev);
- 
-+	pci_disable_device(pdev); /* core did previous enable */
-+
- 	drm_put_dev(dev);
- }
- 
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index 300f64b..2e3db37 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -795,7 +795,8 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
- {
- 	struct drm_device *dev = intel_dp->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
-+	u32 pp, idle_on = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
-+	u32 idle_on_mask = PP_ON | PP_SEQUENCE_STATE_MASK;
- 
- 	if (I915_READ(PCH_PP_STATUS) & PP_ON)
- 		return true;
-@@ -816,7 +817,7 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
- 	 */
- 	msleep(300);
- 
--	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
-+	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on,
- 		     5000))
- 		DRM_ERROR("panel on wait timed out: 0x%08x\n",
- 			  I915_READ(PCH_PP_STATUS));
-@@ -922,6 +923,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
- 
- 	if (is_edp(intel_dp)) {
- 		ironlake_edp_backlight_off(dev);
-+		ironlake_edp_panel_off(dev);
- 		ironlake_edp_panel_on(intel_dp);
- 		if (!is_pch_edp(intel_dp))
- 			ironlake_edp_pll_on(encoder);
diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch
index 209deb295..5ca0152da 100644
--- a/drm-intel-make-lvds-work.patch
+++ b/drm-intel-make-lvds-work.patch
@@ -1,23 +1,19 @@
- drivers/gpu/drm/i915/intel_display.c |    2 --
- 1 files changed, 0 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 990f065..171a797 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -4568,7 +4568,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- 				    struct drm_connector *connector, int dpms_mode)
+diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c
+--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig	2010-03-31 16:59:39.901995671 -0400
++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c	2010-03-31 17:01:05.416996744 -0400
+@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p
+ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
  {
- 	struct drm_encoder *encoder = &intel_encoder->base;
+ 	struct drm_encoder *encoder = &intel_encoder->enc;
 -	struct drm_device *dev = encoder->dev;
  	struct drm_crtc *crtc = encoder->crtc;
  	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
  	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-@@ -4578,7 +4577,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- 		connector->encoder = NULL;
+@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru
+ 		intel_encoder->base.encoder = NULL;
  		intel_encoder->load_detect_temp = false;
  		crtc->enabled = drm_helper_crtc_in_use(crtc);
 -		drm_helper_disable_unused_functions(dev);
  	}
  
- 	/* Switch crtc and encoder back off if necessary */
+ 	/* Switch crtc and output back off if necessary */
diff --git a/hdpvr-ir-enable.patch b/hdpvr-ir-enable.patch
index e73c42122..787c8f84c 100644
--- a/hdpvr-ir-enable.patch
+++ b/hdpvr-ir-enable.patch
@@ -110,7 +110,7 @@ index 463b81b..60cdc06 100644
  						 msgs[i].len);
  	}
  
-@@ -115,30 +120,58 @@ static struct i2c_algorithm hdpvr_algo = {
+@@ -115,31 +120,59 @@ static struct i2c_algorithm hdpvr_algo = {
  	.functionality = hdpvr_functionality,
  };
  
@@ -119,6 +119,7 @@ index 463b81b..60cdc06 100644
 +	.owner 	= THIS_MODULE,
 +	.id 	= I2C_HW_B_HDPVR,
 +	.algo 	= &hdpvr_algo,
++	.class 	= I2C_CLASS_TV_ANALOG,
 +};
 +
 +static struct i2c_board_info hdpvr_i2c_board_info = {
@@ -159,6 +160,7 @@ index 463b81b..60cdc06 100644
 -	strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
 -		sizeof(i2c_adap->name));
 -	i2c_adap->algo  = &hdpvr_algo;
+-	i2c_adap->class = I2C_CLASS_TV_ANALOG;
 -	i2c_adap->owner = THIS_MODULE;
 -	i2c_adap->dev.parent = &dev->udev->dev;
 +	memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
@@ -216,14 +218,3 @@ index b0f046d..2107055 100644
  	/* I2C lock */
  	struct mutex		i2c_mutex;
  
-diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
-index 4bef5c5..4385341 100644
---- a/include/linux/i2c-id.h
-+++ b/include/linux/i2c-id.h
-@@ -33,5 +33,6 @@
- 
- /* --- Bit algorithm adapters						*/
- #define I2C_HW_B_CX2388x	0x01001b /* connexant 2388x based tv cards */
-+#define I2C_HW_B_HDPVR		0x010025 /* Hauppauge HD PVR */
- 
- #endif /* LINUX_I2C_ID_H */
diff --git a/kernel.spec b/kernel.spec
index 2d354f16b..8b30de6c4 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -6,7 +6,7 @@ Summary: The Linux kernel
 # For a stable, released kernel, released_kernel should be 1. For rawhide
 # and/or a kernel built from an rc or git snapshot, released_kernel should
 # be 0.
-%global released_kernel 0
+%global released_kernel 1
 
 # Save original buildid for later if it's defined
 %if 0%{?buildid:1}
@@ -82,9 +82,9 @@ Summary: The Linux kernel
 # The next upstream release sublevel (base_sublevel+1)
 %define upstream_sublevel %(echo $((%{base_sublevel} + 1)))
 # The rc snapshot level
-%define rcrev 5
+%define rcrev 0
 # The git snapshot level
-%define gitrev 2
+%define gitrev 0
 # Set rpm version accordingly
 %define rpmversion 2.6.%{upstream_sublevel}
 %endif
@@ -175,7 +175,7 @@ Summary: The Linux kernel
 %else
 %define gittag .git0
 %endif
-%define pkg_release 0%{?rctag}%{?gittag}.%{fedora_build}%{?buildid}%{?dist}
+%define pkg_release 0.%{fedora_build}%{?rctag}%{?gittag}%{?buildid}%{?dist}
 
 %endif
 
@@ -656,8 +656,6 @@ Patch1810: drm-nouveau-updates.patch
 Patch1819: drm-intel-big-hammer.patch
 # make sure the lvds comes back on lid open
 Patch1825: drm-intel-make-lvds-work.patch
-Patch1826: drm-intel-edp-fixes.patch
-
 Patch1900: linux-2.6-intel-iommu-igfx.patch
 
 # linux1394 git patches
@@ -696,18 +694,52 @@ Patch12018: neuter_intel_microcode_load.patch
 
 Patch12030: tpm-fix-stall-on-boot.patch
 
+# Wacom Bamboo
+Patch12100: wacom-01-add-fuzz-parameters-to-features.patch
+Patch12105: wacom-02-parse-the-bamboo-device-family.patch
+Patch12110: wacom-03-collect-device-quirks-into-single-function.patch
+Patch12115: wacom-04-add-support-for-the-bamboo-touch-trackpad.patch
+Patch12120: wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch
+Patch12125: wacom-06-request-tablet-data-for-bamboo-pens.patch
+Patch12130: wacom-07-move-bamboo-touch-irq-to-its-own-function.patch
+Patch12135: wacom-08-add-support-for-bamboo-pen.patch
+Patch12140: wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch
+
 # Runtime power management
+Patch12200: linux-2.6-bluetooth-autosuspend.patch
+Patch12201: linux-2.6-uvc-autosuspend.patch
+Patch12202: linux-2.6-qcserial-autosuspend.patch
 Patch12203: linux-2.6-usb-pci-autosuspend.patch
 Patch12204: linux-2.6-enable-more-pci-autosuspend.patch
 Patch12205: runtime_pm_fixups.patch
 
+Patch12225: pci-crs-fixes.patch
+Patch12226: x86-never-alloc-pci-from-the-last-1M-below-4G.patch
+
+Patch12300: btusb-macbookpro-7-1.patch
+Patch12301: btusb-macbookpro-6-2.patch
+Patch12304: add-macbookair3-ids.patch
+
 Patch12303: dmar-disable-when-ricoh-multifunction.patch
 
-Patch12410: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
-Patch12411: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
+Patch12305: xhci_hcd-suspend-resume.patch
 
-# rhbz#650934
-Patch12420: sched-cure-more-NO_HZ-load-average-woes.patch
+Patch12308: fix-i8k-inline-asm.patch
+
+Patch12405: inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+Patch12408: netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
+
+Patch12406: posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
+
+Patch12410: tty-make-tiocgicount-a-handler.patch
+Patch12411: tty-icount-changeover-for-other-main-devices.patch
+
+Patch12413: tpm-autodetect-itpm-devices.patch
+
+Patch12420: mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
+Patch12421: mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
+
+Patch12430: sched-cure-more-NO_HZ-load-average-woes.patch
 
 %endif
 
@@ -1195,7 +1227,10 @@ ApplyPatch linux-2.6-defaults-pci_use_crs.patch
 # enable ASPM by default on hardware we expect to work
 ApplyPatch linux-2.6-defaults-aspm.patch
 
-#ApplyPatch ima-allow-it-to-be-completely-disabled-and-default-off.patch
+# helps debug resource conflicts [c1f3f281]
+ApplyPatch pnp-log-pnp-resources-as-we-do-for-pci.patch
+
+ApplyPatch ima-allow-it-to-be-completely-disabled-and-default-off.patch
 
 #
 # SCSI Bits.
@@ -1257,7 +1292,6 @@ ApplyOptionalPatch drm-nouveau-updates.patch
 ApplyPatch drm-intel-big-hammer.patch
 ApplyPatch drm-intel-make-lvds-work.patch
 ApplyPatch linux-2.6-intel-iommu-igfx.patch
-ApplyPatch drm-intel-edp-fixes.patch
 
 # linux1394 git patches
 #ApplyPatch linux-2.6-firewire-git-update.patch
@@ -1291,14 +1325,54 @@ ApplyPatch neuter_intel_microcode_load.patch
 # try to fix stalls during boot (#530393)
 ApplyPatch tpm-fix-stall-on-boot.patch
 
+# Wacom Bamboo
+ApplyPatch wacom-01-add-fuzz-parameters-to-features.patch
+ApplyPatch wacom-02-parse-the-bamboo-device-family.patch
+ApplyPatch wacom-03-collect-device-quirks-into-single-function.patch
+ApplyPatch wacom-04-add-support-for-the-bamboo-touch-trackpad.patch
+ApplyPatch wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch
+ApplyPatch wacom-06-request-tablet-data-for-bamboo-pens.patch
+ApplyPatch wacom-07-move-bamboo-touch-irq-to-its-own-function.patch
+ApplyPatch wacom-08-add-support-for-bamboo-pen.patch
+ApplyPatch wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch
+
 # Runtime PM
+ApplyPatch linux-2.6-bluetooth-autosuspend.patch
+ApplyPatch linux-2.6-uvc-autosuspend.patch
+ApplyPatch linux-2.6-qcserial-autosuspend.patch
 ApplyPatch linux-2.6-usb-pci-autosuspend.patch
 ApplyPatch linux-2.6-enable-more-pci-autosuspend.patch
 ApplyPatch runtime_pm_fixups.patch
 
+# PCI patches to fix problems with _CRS
+# ( from linux-pci list )
+ApplyPatch pci-crs-fixes.patch
+ApplyPatch x86-never-alloc-pci-from-the-last-1M-below-4G.patch
+
+ApplyPatch btusb-macbookpro-7-1.patch
+ApplyPatch btusb-macbookpro-6-2.patch
+ApplyPatch add-macbookair3-ids.patch
+
 # rhbz#605888
 ApplyPatch dmar-disable-when-ricoh-multifunction.patch
 
+ApplyPatch xhci_hcd-suspend-resume.patch
+
+ApplyPatch fix-i8k-inline-asm.patch
+
+# rhbz#651264 (CVE-2010-3880)
+ApplyPatch inet_diag-make-sure-we-run-the-same-bytecode-we-audited.patch
+ApplyPatch netlink-make-nlmsg_find_attr-take-a-const-ptr.patch
+
+# rhbz#656264
+ApplyPatch posix-cpu-timers-workaround-to-suppress-problems-with-mt-exec.patch
+
+# CVE-2010-4077, CVE-2010-4075 (rhbz#648660, #648663)
+ApplyPatch tty-make-tiocgicount-a-handler.patch
+ApplyPatch tty-icount-changeover-for-other-main-devices.patch
+
+ApplyPatch tpm-autodetect-itpm-devices.patch
+
 # backport some fixes for kswapd from mmotm, rhbz#649694
 ApplyPatch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
 ApplyPatch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch
@@ -1920,113 +1994,123 @@ fi
 #                 ||     ||
 
 %changelog
-* Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc5.git2.1
-- Linux 2.6.37-rc5-git2
+* Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.2-12.rc1
+- Linux stable 2.6.36.2-rc1
+- Drop patches merged in stable series:
+   tty-dont-allow-reopen-when-ldisc-is-changing.patch
+   tty-ldisc-fix-open-flag-handling.patch
+   tty-open-hangup-race-fixup.patch
+   tty-restore-tty_ldisc_wait_idle.patch
+   hda_realtek-handle-unset-external-amp-bits.patch
+   ipc-shm-fix-information-leak-to-user.patch
+   ipc-zero-struct-memory-for-compat-fns.patch
+   linux-2.6-rcu-sched-warning.patch
+   pnpacpi-cope-with-invalid-device-ids.patch
+   radeon-mc-vram-map-needs-to-be-gt-pci-aperture.patch
+
+* Wed Dec 08 2010 Kyle McMartin <kyle@redhat.com>
 - sched-cure-more-NO_HZ-load-average-woes.patch: fix some of the complaints
   in 2.6.35+ about load average with dynticks. (rhbz#650934)
 
-* Tue Dec 07 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc5.git0.1
-- Linux 2.6.37-rc5
-
 * Sat Dec 04 2010 Kyle McMartin <kyle@redhat.com>
 - Enable C++ symbol demangling with perf by linking against libiberty.a,
   which is LGPL2.
 
-* Fri Dec 03 2010 Kyle McMartin <kyle@redhat.com>
-- Linux 2.6.37-rc4-git3
-- Enable HP ILO on x86_64 for (#571329)
-- Drop merged drm-fixes.patch, split out edp-fixes.
-- tty-dont-allow-reopen-when-ldisc-is-changing.patch: upstream.
-- tty-ldisc-fix-open-flag-handling.patch: upstream.
-- Enable CIFS_ACL.
+* Fri Dec 03 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-11
+- Enable HP ILO on x86_64. (#571329)
 
 * Thu Dec 02 2010 Kyle McMartin <kyle@redhat.com>
 - Grab some of Mel's fixes from -mmotm to hopefully sort out #649694.
-
-* Wed Dec 01 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc4.git1.1
-- Linux 2.6.37-rc4-git1
-- Pull in DRM fixes that are queued for -rc5 [3074adc8]
-  + edp-fixes on top
-
-* Tue Nov 30 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc4.git0.1
-- Linux 2.6.37-rc4
+  They've been tested by a few on that bug on 2.6.35, but let's push
+  it out to a bigger audience.
 
 * Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com>
-- Update debug-vm-would_have_oomkilled patch.
+- PNP: log PNP resources, as we do for PCI [c1f3f281]
+  should help us debug resource conflicts (requested by bjorn.)
 
-* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc3.git6.1
-- Linux 2.6.37-rc3-git6
-- TTY: open/hangup race fixup (rhbz#630464)
-
-* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc3.git3.1
-- Linux 2.6.37-rc3-git3
-- Print tty->flags as well in debugging patch...
+* Mon Nov 29 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-10
+- tpm-autodetect-itpm-devices.patch: Auto-fix TPM issues on various
+  laptops which prevented suspend/resume. (#647132)
+- tty fixes from kernel-git (#630464)
 
 * Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
-- Copy tty_open WARN_ON debugging patch from rawhide.
+- Quiet a build warning the previous INET_DIAG fix caused.
 
-* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc3.git2.1
-- Linux 2.6.37-rc3-git2
-- CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set, so the cgroup memory
-  resource controller swap accounting is disabled by default. You can
-  enable it with 'swapaccount' if desired.
-- TTY: don't allow reopen when ldisc is changing (rhbz#630464)
+* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
+- Plug stack leaks in tty/serial drivers. (#648663, #648660)
 
-* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc3.git1.1
-- Linux 2.6.37-rc3-git1
+* Fri Nov 26 2010 Kyle McMartin <kyle@redhat.com>
+- hda/realtek: handle unset external amp config (#657388)
 
-* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc3.git0.1
-- Linux 2.6.37-rc3
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- Disable FSCACHE for CIFS until issues are addressed. (#656498)
 
-* Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc2.git7.1
-- Linux 2.6.37-rc2-git7
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- drm/radeon/kms: MC vram map needs to be >= pci aperture size (fdo#28402)
 
-* Fri Nov 19 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.rc2.git5.1
-- Linux 2.6.37-rc2-git5
+* Wed Nov 24 2010 Kyle McMartin <kyle@redhat.com>
+- Fix graphics on HP 2530p (korg#23542)
 
-* Thu Nov 18 2010 Kyle McMartin <kyle@redhat.com>
-- Move %{fedora_build} in the un-released (ie: -git/-rc) kernel case for
-  a variety of reasons, principally so that:
-  1: Bumping %baserelease isn't needed if we're just updating snapshots.
-  2: %buildid will sort as newer so we don't need to bump baserelease when
-     building bugzilla fixes.
+* Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
+- zero struct memory in ipc compat (CVE-2010-4073) (#648658)
+- zero struct memory in ipc shm (CVE-2010-4072) (#648656)
+- fix logic error in INET_DIAG bytecode auditing (CVE-2010-3880) (#651264)
+- posix-cpu-timers: workaround to suppress the problems with mt exec
+  (rhbz#656264)
 
-* Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc2.git2
-- Linux 2.6.37-rc2-git2
-- enable STRICT_DEVMEM on s390x.
+* Tue Nov 23 2010 Kyle McMartin <kyle@redhat.com>
+- fix-i8k-inline-asm.patch: backport gcc miscompilation fix from git
+  [22d3243d, 6b4e81db] (rhbz#647677)
+
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com>
+- Add a debugging patch to help track down which tty is being
+  poked by plymouth.
+
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-9
+- Linux stable 2.6.36.1
+
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-8.rc1
+- Merge 100eeae2 (TTY: restore tty_ldisc_wait_idle) which should fix the WARN
+  in tty_open in rawhide.
+
+* Mon Nov 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.36.1-7.rc1
+- Make vmlinuz world readable again.
+
+* Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com>
+- Merge patch from Aris to allow kernel-debuginfo to be multiply-installed
+  (means we had to move the build dir, kind of a bummer, but I verified
+   that a -gitN to -gitN+1 worked.)
+
+* Sat Nov 20 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.36.1-6.rc1
+- Linux 2.6.36.1-rc1
+- Comment out upstreamed patches:
+  secmark-do-not-return-early-if-there-was-no-error.patch
+
+* Sat Nov 20 2010 Kyle McMartin <kyle@redhat.com>
+- secmark-do-not-return-early-if-there-was-no-error.patch: requested
+  by eparis@. (Fixes a BUG when using secmark.)
+
+* Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-5
+- Disable drm/intel rebase until it can be fixed.
 
 * Wed Nov 17 2010 Kyle McMartin <kyle@redhat.com>
 - Make vmlinuz/System.map root read-write only by default. You can just
   chmod 644 them later if you (unlikely) need them without root.
 
-* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc2.git0
-- Linux 2.6.37-rc2
+* Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-4
+- Disable parallel doc builds, they fail. Constantly.
 
-* Sat Nov 13 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc1.git10
-- Linux 2.6.37-rc1-git10
-- SECURITY_DMESG_RESTRICT added, the principle of least surprise dictates
-  we should probably have it off. If you want to restrict dmesg access
-  you may use the kernel.dmesg_restrict sysctl.
-- linux-2.6-bluetooth-autosuspend.patch: merged upstream.
+* Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-3
+- Rebase drm/intel to 2.6.37-rc2+edp_fixes, hopefully to sort out most of
+  the issues folks with eDP are having.
+- Switch to release builds and turn on debugging flavours.
 
-* Tue Nov 09 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc1.git7
-- Linux 2.6.37-rc1-git7
+* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com>
+- rhbz#651019: pull in support for MBA3.
 
-* Mon Nov 08 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc1.git5
-- Linux 2.6.37-rc1-git5
-
-* Mon Nov 08 2010 Kyle McMartin <kyle@redhat.com>
-- Cherry-pick utrace-ptrace fixes from mayoung. Thanks!
-
-* Tue Nov 02 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc1.git0
-- Linux 2.6.37-rc1
-
-* Tue Oct 26 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc0.git8
-- Linux 2.6.36-git8
-
-* Fri Oct 22 2010 Kyle McMartin <kyle@redhat.com> 2.6.37-0.1.rc0.git2
-- Switch to tracking git snapshots of what will become 2.6.37.
-- Fix context rejects in utrace and a few other patches.
+* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-2
+- drm-i915-reprogram-power-monitoring-registers-on-resume.patch: fix intel_ips
+  driver.
 
 * Wed Oct 20 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.36-1
 - Linux 2.6.36
diff --git a/linux-2.6-32bit-mmap-exec-randomization.patch b/linux-2.6-32bit-mmap-exec-randomization.patch
index d42638c27..fe91e0abf 100644
--- a/linux-2.6-32bit-mmap-exec-randomization.patch
+++ b/linux-2.6-32bit-mmap-exec-randomization.patch
@@ -14,9 +14,9 @@
 --- b/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -28,6 +28,7 @@
+ #include <linux/rmap.h>
  #include <linux/mmu_notifier.h>
  #include <linux/perf_event.h>
- #include <linux/audit.h>
 +#include <linux/random.h>
  
  #include <asm/uaccess.h>
diff --git a/linux-2.6-bluetooth-autosuspend.patch b/linux-2.6-bluetooth-autosuspend.patch
new file mode 100644
index 000000000..663b79979
--- /dev/null
+++ b/linux-2.6-bluetooth-autosuspend.patch
@@ -0,0 +1,159 @@
+commit 6aa42966dea9a1fc02a714211ea489c3278bf8d4
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Sep 16 13:34:55 2010 -0400
+
+    bluetooth: Take a runtime pm reference on hid connections
+    
+    Bluetooth runtime PM interacts badly with input devices - the connection
+    will be dropped if the device becomes idle, resulting in noticable lag when
+    the user interacts with the input device again. Bump the pm runtime count
+    when the device is associated and release it when it's disassociated in
+    order to avoid this.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index bfe641b..a4489a7 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -36,6 +36,7 @@
+ #include <linux/file.h>
+ #include <linux/init.h>
+ #include <linux/wait.h>
++#include <linux/pm_runtime.h>
+ #include <net/sock.h>
+ 
+ #include <linux/input.h>
+@@ -622,6 +623,14 @@ static int hidp_session(void *arg)
+ 	return 0;
+ }
+ 
++static struct hci_dev *hidp_get_hci(struct hidp_session *session)
++{
++	bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
++	bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
++
++	return hci_get_route(dst, src);
++}
++
+ static struct device *hidp_get_device(struct hidp_session *session)
+ {
+ 	bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
+@@ -819,6 +828,7 @@ fault:
+ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
+ {
+ 	struct hidp_session *session, *s;
++	struct hci_dev *hdev;
+ 	int err;
+ 
+ 	BT_DBG("");
+@@ -889,6 +899,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
+ 		hidp_input_event(session->input, EV_LED, 0, 0);
+ 	}
+ 
++	hdev = hidp_get_hci(session);
++	pm_runtime_get(hdev->parent);
++	hci_dev_put(hdev);
++
+ 	up_write(&hidp_session_sem);
+ 	return 0;
+ 
+@@ -925,6 +939,7 @@ failed:
+ int hidp_del_connection(struct hidp_conndel_req *req)
+ {
+ 	struct hidp_session *session;
++	struct hci_dev *hdev;
+ 	int err = 0;
+ 
+ 	BT_DBG("");
+@@ -952,6 +967,9 @@ int hidp_del_connection(struct hidp_conndel_req *req)
+ 	} else
+ 		err = -ENOENT;
+ 
++	hdev = hidp_get_hci(session);
++	pm_runtime_put(hdev->parent);
++	hci_dev_put(hdev);
+ 	up_read(&hidp_session_sem);
+ 	return err;
+ }
+
+commit 482eca592615e85b048753750b101d051b77fde9
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Sep 16 13:49:24 2010 -0400
+
+    bluetooth: Remove some unnecessary error messages
+    
+    The main reason for these urbs to error out on submission is that runtime
+    pm has kicked in, which is unnecessary noise. Let's just drop them.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index d22ce3c..3ace025 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -229,11 +229,8 @@ static void btusb_intr_complete(struct urb *urb)
+ 	usb_anchor_urb(urb, &data->intr_anchor);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (err < 0) {
+-		BT_ERR("%s urb %p failed to resubmit (%d)",
+-						hdev->name, urb, -err);
++	if (err < 0)
+ 		usb_unanchor_urb(urb);
+-	}
+ }
+ 
+ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
+@@ -313,11 +310,8 @@ static void btusb_bulk_complete(struct urb *urb)
+ 	usb_mark_last_busy(data->udev);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (err < 0) {
+-		BT_ERR("%s urb %p failed to resubmit (%d)",
+-						hdev->name, urb, -err);
++	if (err < 0)
+ 		usb_unanchor_urb(urb);
+-	}
+ }
+ 
+ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
+@@ -402,11 +396,8 @@ static void btusb_isoc_complete(struct urb *urb)
+ 	usb_anchor_urb(urb, &data->isoc_anchor);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (err < 0) {
+-		BT_ERR("%s urb %p failed to resubmit (%d)",
+-						hdev->name, urb, -err);
++	if (err < 0)
+ 		usb_unanchor_urb(urb);
+-	}
+ }
+ 
+ static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+
+commit fd763c5b14ed99ac2401f8e8f1a07c3687ae01cc
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Sep 16 13:37:38 2010 -0400
+
+    bluetooth: Enable USB autosuspend by default on btusb
+    
+    We've done this for a while in Fedora without any obvious problems other
+    than some interaction with input devices. Those should be fixed now, so
+    let's try this in mainline.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 3ace025..03b64e4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1014,6 +1014,8 @@ static int btusb_probe(struct usb_interface *intf,
+ 
+ 	usb_set_intfdata(intf, data);
+ 
++	usb_enable_autosuspend(interface_to_usbdev(intf));
++
+ 	return 0;
+ }
+ 
diff --git a/linux-2.6-crash-driver.patch b/linux-2.6-crash-driver.patch
index 0f11aba0d..7b518bb88 100644
--- a/linux-2.6-crash-driver.patch
+++ b/linux-2.6-crash-driver.patch
@@ -233,6 +233,18 @@ index 3141dd3..153658c 100644
  
  config LEGACY_PTY_COUNT
  	int "Maximum number of legacy PTY in use"
+diff --git a/drivers/char/Makefile b/drivers/char/Makefile
+index f957edf..604c418 100644
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -111,6 +111,8 @@ obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
+ obj-$(CONFIG_JS_RTC)		+= js-rtc.o
+ js-rtc-y = rtc.o
+ 
++obj-$(CONFIG_CRASH)		+= crash.o
++
+ # Files generated that shall be removed upon make clean
+ clean-files := consolemap_deftbl.c defkeymap.c
  
 diff --git a/drivers/char/crash.c b/drivers/char/crash.c
 new file mode 100644
@@ -368,14 +380,6 @@ index 0000000..e5437de
 +module_exit(crash_cleanup_module);
 +
 +MODULE_LICENSE("GPL");
+-- 
+1.7.0.1
 
-diff --git a/drivers/char/Makefile b/drivers/char/Makefile
-index ba53ec9..6588b33 100644
---- a/drivers/char/Makefile
-+++ b/drivers/char/Makefile
-@@ -98,3 +98,5 @@ obj-$(CONFIG_RAMOOPS)		+= ramoops.o
- 
- obj-$(CONFIG_JS_RTC)		+= js-rtc.o
- js-rtc-y = rtc.o
-+
-+obj-$(CONFIG_CRASH)		+= crash.o
diff --git a/linux-2.6-enable-more-pci-autosuspend.patch b/linux-2.6-enable-more-pci-autosuspend.patch
index 666ded640..b1f92b134 100644
--- a/linux-2.6-enable-more-pci-autosuspend.patch
+++ b/linux-2.6-enable-more-pci-autosuspend.patch
@@ -1,20 +1,7 @@
- drivers/acpi/acpica/aclocal.h    |    7 +--
- drivers/acpi/acpica/evgpe.c      |   75 +++++++++++++++-----------------
- drivers/acpi/acpica/evgpeinit.c  |   11 +----
- drivers/acpi/acpica/evgpeutil.c  |    5 +-
- drivers/acpi/acpica/evxface.c    |   23 +++++-----
- drivers/acpi/ec.c                |    2 +-
- drivers/acpi/pci_bind.c          |   86 ++++++++++++++++++++++++++++++++++++++
- drivers/acpi/sleep.c             |    2 +-
- drivers/char/ipmi/ipmi_si_intf.c |    2 +-
- include/acpi/acpixf.h            |    3 +-
- 10 files changed, 143 insertions(+), 73 deletions(-)
-
-diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
-index 2ceb0c0..3663362 100644
---- a/drivers/acpi/acpica/aclocal.h
-+++ b/drivers/acpi/acpica/aclocal.h
-@@ -406,17 +406,16 @@ struct acpi_predefined_data {
+diff -up linux-2.6.35.x86_64/drivers/acpi/acpica/aclocal.h.mjg linux-2.6.35.x86_64/drivers/acpi/acpica/aclocal.h
+--- linux-2.6.35.x86_64/drivers/acpi/acpica/aclocal.h.mjg	2010-10-04 13:52:05.086789354 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/acpica/aclocal.h	2010-10-04 13:52:50.948801001 -0400
+@@ -406,16 +406,15 @@ struct acpi_predefined_data {
   *
   ****************************************************************************/
  
@@ -26,7 +13,6 @@ index 2ceb0c0..3663362 100644
  	void *context;		/* Context to be passed to handler */
 -	struct acpi_namespace_node *method_node;	/* Method node for this GPE level (saved) */
  	u8 orig_flags;		/* Original misc info about this GPE */
- 	u8 orig_enabled;	/* Set if the GPE was originally enabled */
  };
  
 -union acpi_gpe_dispatch_info {
@@ -34,7 +20,7 @@ index 2ceb0c0..3663362 100644
  	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
  	struct acpi_handler_info *handler;
  };
-@@ -426,7 +425,7 @@ union acpi_gpe_dispatch_info {
+@@ -425,7 +424,7 @@ union acpi_gpe_dispatch_info {
   * NOTE: Important to keep this struct as small as possible.
   */
  struct acpi_gpe_event_info {
@@ -43,11 +29,10 @@ index 2ceb0c0..3663362 100644
  	struct acpi_gpe_register_info *register_info;	/* Backpointer to register info */
  	u8 flags;		/* Misc info about this GPE */
  	u8 gpe_number;		/* This GPE */
-diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
-index f226eac..c4b1c4c 100644
---- a/drivers/acpi/acpica/evgpe.c
-+++ b/drivers/acpi/acpica/evgpe.c
-@@ -474,9 +474,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
+diff -up linux-2.6.35.x86_64/drivers/acpi/acpica/evgpe.c.mjg linux-2.6.35.x86_64/drivers/acpi/acpica/evgpe.c
+--- linux-2.6.35.x86_64/drivers/acpi/acpica/evgpe.c.mjg	2010-10-04 13:52:05.088789399 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/acpica/evgpe.c	2010-10-04 13:52:50.950801045 -0400
+@@ -474,9 +474,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_as
  	 * Must check for control method type dispatch one more time to avoid a
  	 * race with ev_gpe_install_handler
  	 */
@@ -58,7 +43,7 @@ index f226eac..c4b1c4c 100644
  		/* Allocate the evaluation information block */
  
  		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-@@ -575,41 +573,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+@@ -575,41 +573,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_eve
  	}
  
  	/*
@@ -108,7 +93,7 @@ index f226eac..c4b1c4c 100644
  		/*
  		 * Disable the GPE, so it doesn't keep firing before the method has a
  		 * chance to run (it runs asynchronously with interrupts enabled).
-@@ -634,10 +606,34 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+@@ -634,10 +606,34 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_eve
  					"Unable to queue handler for GPE[0x%2X] - event disabled",
  					gpe_number));
  		}
@@ -126,7 +111,7 @@ index f226eac..c4b1c4c 100644
 +								dispatch.
 +								handler->
 +								context);
- 
++
 +		/* It is now safe to clear level-triggered events. */
 +
 +		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
@@ -140,12 +125,12 @@ index f226eac..c4b1c4c 100644
 +			}
 +		}
 +	}
-+
+ 
 +	if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
  		/*
  		 * No handler or method to run!
  		 * 03/2010: This case should no longer be possible. We will not allow
-@@ -658,7 +654,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+@@ -658,7 +654,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_eve
  					gpe_number));
  			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
  		}
@@ -153,11 +138,10 @@ index f226eac..c4b1c4c 100644
  	}
  
  	return_UINT32(ACPI_INTERRUPT_HANDLED);
-diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
-index 2c7def9..9915b52 100644
---- a/drivers/acpi/acpica/evgpeinit.c
-+++ b/drivers/acpi/acpica/evgpeinit.c
-@@ -386,16 +386,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
+diff -up linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeinit.c.mjg linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeinit.c
+--- linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeinit.c.mjg	2010-10-04 13:52:05.089789421 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeinit.c	2010-10-04 13:52:50.951801067 -0400
+@@ -392,16 +392,7 @@ acpi_ev_match_gpe_method(acpi_handle obj
  		return_ACPI_STATUS(AE_OK);
  	}
  
@@ -175,11 +159,10 @@ index 2c7def9..9915b52 100644
  		/*
  		 * If there is already a method, ignore this method. But check
  		 * for a type mismatch (if both the _Lxx AND _Exx exist)
-diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
-index 19a0e51..434ad1b 100644
---- a/drivers/acpi/acpica/evgpeutil.c
-+++ b/drivers/acpi/acpica/evgpeutil.c
-@@ -323,12 +323,11 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+diff -up linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeutil.c.mjg linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeutil.c
+--- linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeutil.c.mjg	2010-10-04 13:52:05.090789443 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/acpica/evgpeutil.c	2010-10-04 13:52:50.952801089 -0400
+@@ -323,12 +323,11 @@ acpi_ev_delete_gpe_handlers(struct acpi_
  								 ACPI_GPE_REGISTER_WIDTH)
  								+ j];
  
@@ -194,11 +177,10 @@ index 19a0e51..434ad1b 100644
  			}
  		}
  	}
-diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
-index 36af222..b097136 100644
---- a/drivers/acpi/acpica/evxface.c
-+++ b/drivers/acpi/acpica/evxface.c
-@@ -662,6 +662,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
+diff -up linux-2.6.35.x86_64/drivers/acpi/acpica/evxface.c.mjg linux-2.6.35.x86_64/drivers/acpi/acpica/evxface.c
+--- linux-2.6.35.x86_64/drivers/acpi/acpica/evxface.c.mjg	2010-10-04 13:52:05.092789487 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/acpica/evxface.c	2010-10-04 13:52:50.954801133 -0400
+@@ -662,6 +662,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_ha
   *                                edge- or level-triggered interrupt.
   *              Address         - Address of the handler
   *              Context         - Value passed to the handler on each GPE
@@ -207,7 +189,7 @@ index 36af222..b097136 100644
   *
   * RETURN:      Status
   *
-@@ -671,7 +673,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
+@@ -671,7 +673,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_ha
  acpi_status
  acpi_install_gpe_handler(acpi_handle gpe_device,
  			 u32 gpe_number,
@@ -217,7 +199,7 @@ index 36af222..b097136 100644
  {
  	struct acpi_gpe_event_info *gpe_event_info;
  	struct acpi_handler_info *handler;
-@@ -711,8 +714,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+@@ -711,8 +714,7 @@ acpi_install_gpe_handler(acpi_handle gpe
  
  	/* Make sure that there isn't a handler there already */
  
@@ -227,7 +209,7 @@ index 36af222..b097136 100644
  		status = AE_ALREADY_EXISTS;
  		goto free_and_exit;
  	}
-@@ -721,7 +723,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+@@ -721,7 +723,6 @@ acpi_install_gpe_handler(acpi_handle gpe
  
  	handler->address = address;
  	handler->context = context;
@@ -235,16 +217,15 @@ index 36af222..b097136 100644
  	handler->orig_flags = gpe_event_info->flags &
  			(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
  
-@@ -732,7 +733,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+@@ -733,17 +734,17 @@ acpi_install_gpe_handler(acpi_handle gpe
  	 */
  
  	if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
--	    && gpe_event_info->runtime_count) {
-+	    && gpe_event_info->runtime_count && !keep_method) {
- 		handler->orig_enabled = 1;
+-	    && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
++	    && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) && !keep_method)
  		(void)acpi_raw_disable_gpe(gpe_event_info);
- 	}
-@@ -741,10 +742,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
+ 
+ 	/* Install the handler */
  
  	gpe_event_info->dispatch.handler = handler;
  
@@ -258,7 +239,7 @@ index 36af222..b097136 100644
  	gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
  
  	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-@@ -813,8 +814,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
+@@ -812,8 +813,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_
  
  	/* Make sure that a handler is indeed installed */
  
@@ -268,7 +249,7 @@ index 36af222..b097136 100644
  		status = AE_NOT_EXIST;
  		goto unlock_and_exit;
  	}
-@@ -830,9 +830,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
+@@ -829,9 +829,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_
  
  	handler = gpe_event_info->dispatch.handler;
  
@@ -279,11 +260,10 @@ index 36af222..b097136 100644
  	gpe_event_info->flags &=
  		~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
  	gpe_event_info->flags |= handler->orig_flags;
-diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index 372ff80..9a39f82 100644
---- a/drivers/acpi/ec.c
-+++ b/drivers/acpi/ec.c
-@@ -740,7 +740,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
+diff -up linux-2.6.35.x86_64/drivers/acpi/ec.c.mjg linux-2.6.35.x86_64/drivers/acpi/ec.c
+--- linux-2.6.35.x86_64/drivers/acpi/ec.c.mjg	2010-10-04 13:52:05.094789531 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/ec.c	2010-10-04 13:52:50.955801156 -0400
+@@ -746,7 +746,7 @@ static int ec_install_handlers(struct ac
  		return 0;
  	status = acpi_install_gpe_handler(NULL, ec->gpe,
  				  ACPI_GPE_EDGE_TRIGGERED,
@@ -292,10 +272,9 @@ index 372ff80..9a39f82 100644
  	if (ACPI_FAILURE(status))
  		return -ENODEV;
  
-diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
-index 2ef0409..8b3cc6a 100644
---- a/drivers/acpi/pci_bind.c
-+++ b/drivers/acpi/pci_bind.c
+diff -up linux-2.6.35.x86_64/drivers/acpi/pci_bind.c.mjg linux-2.6.35.x86_64/drivers/acpi/pci_bind.c
+--- linux-2.6.35.x86_64/drivers/acpi/pci_bind.c.mjg	2010-10-04 13:52:05.102789707 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/pci_bind.c	2010-10-04 13:52:50.962801311 -0400
 @@ -28,6 +28,7 @@
  #include <linux/pci.h>
  #include <linux/pci-acpi.h>
@@ -348,7 +327,7 @@ index 2ef0409..8b3cc6a 100644
  static int acpi_pci_unbind(struct acpi_device *device)
  {
  	struct pci_dev *dev;
-@@ -43,6 +81,30 @@ static int acpi_pci_unbind(struct acpi_device *device)
+@@ -43,6 +81,30 @@ static int acpi_pci_unbind(struct acpi_d
  	if (!dev)
  		goto out;
  
@@ -379,7 +358,7 @@ index 2ef0409..8b3cc6a 100644
  	device_set_run_wake(&dev->dev, false);
  	pci_acpi_remove_pm_notifier(device);
  
-@@ -71,6 +133,30 @@ static int acpi_pci_bind(struct acpi_device *device)
+@@ -71,6 +133,30 @@ static int acpi_pci_bind(struct acpi_dev
  		return 0;
  
  	pci_acpi_add_pm_notifier(device, dev);
@@ -410,24 +389,25 @@ index 2ef0409..8b3cc6a 100644
  	if (device->wakeup.flags.run_wake)
  		device_set_run_wake(&dev->dev, true);
  
-diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
-index 721d93b..b0ddef6 100644
---- a/drivers/acpi/sleep.c
-+++ b/drivers/acpi/sleep.c
-@@ -643,7 +643,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
- 			if (acpi_target_sleep_state != ACPI_STATE_S0 ||
- 			    status != AE_NOT_FOUND)
- 				d_max = d_min;
+diff -up linux-2.6.35.x86_64/drivers/acpi/sleep.c.mjg linux-2.6.35.x86_64/drivers/acpi/sleep.c
+--- linux-2.6.35.x86_64/drivers/acpi/sleep.c.mjg	2010-10-04 13:52:05.103789729 -0400
++++ linux-2.6.35.x86_64/drivers/acpi/sleep.c	2010-10-04 13:52:50.963801333 -0400
+@@ -631,9 +631,9 @@ int acpi_pm_device_sleep_state(struct de
+ 		acpi_method[3] = 'W';
+ 		status = acpi_evaluate_integer(handle, acpi_method, NULL,
+ 						&d_max);
+-		if (ACPI_FAILURE(status)) {
++		if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ 			d_max = d_min;
 -		} else if (d_max < d_min) {
 +		} else if (ACPI_SUCCESS(status) && d_max < d_min) {
  			/* Warn the user of the broken DSDT */
  			printk(KERN_WARNING "ACPI: Wrong value from %s\n",
  				acpi_method);
-diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
-index 035da9e..62a48b2 100644
---- a/drivers/char/ipmi/ipmi_si_intf.c
-+++ b/drivers/char/ipmi/ipmi_si_intf.c
-@@ -1970,7 +1970,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
+diff -up linux-2.6.35.x86_64/drivers/char/ipmi/ipmi_si_intf.c.mjg linux-2.6.35.x86_64/drivers/char/ipmi/ipmi_si_intf.c
+--- linux-2.6.35.x86_64/drivers/char/ipmi/ipmi_si_intf.c.mjg	2010-10-04 13:52:05.097789597 -0400
++++ linux-2.6.35.x86_64/drivers/char/ipmi/ipmi_si_intf.c	2010-10-04 13:52:50.958801223 -0400
+@@ -1959,7 +1959,7 @@ static int acpi_gpe_irq_setup(struct smi
  					  info->irq,
  					  ACPI_GPE_LEVEL_TRIGGERED,
  					  &ipmi_acpi_gpe,
@@ -436,11 +416,126 @@ index 035da9e..62a48b2 100644
  	if (status != AE_OK) {
  		dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
  			 " running polled\n", DEVICE_NAME, info->irq);
-diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
-index 53b7cfd..740eb7e 100644
---- a/include/acpi/acpixf.h
-+++ b/include/acpi/acpixf.h
-@@ -258,7 +258,8 @@ acpi_remove_address_space_handler(acpi_handle device,
+diff -up linux-2.6.35.x86_64/drivers/pci/pci.c.mjg linux-2.6.35.x86_64/drivers/pci/pci.c
+--- linux-2.6.35.x86_64/drivers/pci/pci.c.mjg	2010-10-04 13:52:05.105789773 -0400
++++ linux-2.6.35.x86_64/drivers/pci/pci.c	2010-10-04 13:52:50.965801377 -0400
+@@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems);
+ 
+ unsigned int pci_pm_d3_delay;
+ 
++static void pci_pme_list_scan(struct work_struct *work);
++
++static LIST_HEAD(pci_pme_list);
++static DEFINE_MUTEX(pci_pme_list_mutex);
++static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
++
++struct pci_pme_device {
++	struct list_head list;
++	struct pci_dev *dev;
++};
++
++#define PME_TIMEOUT 1000 /* How long between PME checks */
++
+ static void pci_dev_d3_sleep(struct pci_dev *dev)
+ {
+ 	unsigned int delay = dev->d3_delay;
+@@ -1331,6 +1344,32 @@ bool pci_pme_capable(struct pci_dev *dev
+ 	return !!(dev->pme_support & (1 << state));
+ }
+ 
++static void pci_pme_list_scan(struct work_struct *work)
++{
++	struct pci_pme_device *pme_dev;
++
++	mutex_lock(&pci_pme_list_mutex);
++	if (!list_empty(&pci_pme_list)) {
++		list_for_each_entry(pme_dev, &pci_pme_list, list)
++			pci_pme_wakeup(pme_dev->dev, NULL);
++		schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
++	}
++	mutex_unlock(&pci_pme_list_mutex);
++}
++
++/**
++ * pci_external_pme - is a device an external PCI PME source?
++ * @dev: PCI device to check
++ *
++ */
++
++static bool pci_external_pme(struct pci_dev *dev)
++{
++	if (pci_is_pcie(dev) || dev->bus->number == 0)
++		return false;
++	return true;
++}
++
+ /**
+  * pci_pme_active - enable or disable PCI device's PME# function
+  * @dev: PCI device to handle.
+@@ -1354,6 +1393,44 @@ void pci_pme_active(struct pci_dev *dev,
+ 
+ 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+ 
++	/* PCI (as opposed to PCIe) PME requires that the device have
++	   its PME# line hooked up correctly. Not all hardware vendors
++	   do this, so the PME never gets delivered and the device
++	   remains asleep. The easiest way around this is to
++	   periodically walk the list of suspended devices and check
++	   whether any have their PME flag set. The assumption is that
++	   we'll wake up often enough anyway that this won't be a huge
++	   hit, and the power savings from the devices will still be a
++	   win. */
++
++	if (pci_external_pme(dev)) {
++		struct pci_pme_device *pme_dev;
++		if (enable) {
++			pme_dev = kmalloc(sizeof(struct pci_pme_device),
++					  GFP_KERNEL);
++			if (!pme_dev)
++				goto out;
++			pme_dev->dev = dev;
++			mutex_lock(&pci_pme_list_mutex);
++			list_add(&pme_dev->list, &pci_pme_list);
++			if (list_is_singular(&pci_pme_list))
++				schedule_delayed_work(&pci_pme_work,
++						      msecs_to_jiffies(PME_TIMEOUT));
++			mutex_unlock(&pci_pme_list_mutex);
++		} else {
++			mutex_lock(&pci_pme_list_mutex);
++			list_for_each_entry(pme_dev, &pci_pme_list, list) {
++				if (pme_dev->dev == dev) {
++					list_del(&pme_dev->list);
++					kfree(pme_dev);
++					break;
++				}
++			}
++			mutex_unlock(&pci_pme_list_mutex);
++		}
++	}
++
++out:
+ 	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
+ 			enable ? "enabled" : "disabled");
+ }
+diff -up linux-2.6.35.x86_64/drivers/pci/pci.h.mjg linux-2.6.35.x86_64/drivers/pci/pci.h
+--- linux-2.6.35.x86_64/drivers/pci/pci.h.mjg	2010-10-04 13:52:05.100789663 -0400
++++ linux-2.6.35.x86_64/drivers/pci/pci.h	2010-10-04 13:52:50.960801267 -0400
+@@ -63,11 +63,8 @@ struct pci_platform_pm_ops {
+ extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+ extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+ extern void pci_disable_enabled_device(struct pci_dev *dev);
+-extern bool pci_check_pme_status(struct pci_dev *dev);
+ extern int pci_finish_runtime_suspend(struct pci_dev *dev);
+-extern void pci_wakeup_event(struct pci_dev *dev);
+ extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+-extern void pci_pme_wakeup_bus(struct pci_bus *bus);
+ extern void pci_pm_init(struct pci_dev *dev);
+ extern void platform_pci_wakeup_init(struct pci_dev *dev);
+ extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+diff -up linux-2.6.35.x86_64/include/acpi/acpixf.h.mjg linux-2.6.35.x86_64/include/acpi/acpixf.h
+--- linux-2.6.35.x86_64/include/acpi/acpixf.h.mjg	2010-10-04 13:52:05.099789641 -0400
++++ linux-2.6.35.x86_64/include/acpi/acpixf.h	2010-10-04 13:52:50.959801245 -0400
+@@ -253,7 +253,8 @@ acpi_remove_address_space_handler(acpi_h
  acpi_status
  acpi_install_gpe_handler(acpi_handle gpe_device,
  			 u32 gpe_number,
@@ -450,3 +545,16 @@ index 53b7cfd..740eb7e 100644
  
  acpi_status
  acpi_remove_gpe_handler(acpi_handle gpe_device,
+diff -up linux-2.6.35.x86_64/include/linux/pci.h.mjg linux-2.6.35.x86_64/include/linux/pci.h
+--- linux-2.6.35.x86_64/include/linux/pci.h.mjg	2010-10-04 13:52:05.101789685 -0400
++++ linux-2.6.35.x86_64/include/linux/pci.h	2010-10-04 13:52:50.962801311 -0400
+@@ -819,6 +819,9 @@ pci_power_t pci_target_state(struct pci_
+ int pci_prepare_to_sleep(struct pci_dev *dev);
+ int pci_back_from_sleep(struct pci_dev *dev);
+ bool pci_dev_run_wake(struct pci_dev *dev);
++bool pci_check_pme_status(struct pci_dev *dev);
++void pci_wakeup_event(struct pci_dev *dev);
++void pci_pme_wakeup_bus(struct pci_bus *bus);
+ 
+ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ 				  bool enable)
diff --git a/linux-2.6-i386-nx-emulation.patch b/linux-2.6-i386-nx-emulation.patch
index 24a2ed500..094c5b845 100644
--- a/linux-2.6-i386-nx-emulation.patch
+++ b/linux-2.6-i386-nx-emulation.patch
@@ -394,8 +394,8 @@
 --- a/arch/x86/mm/tlb.c
 +++ b/arch/x86/mm/tlb.c
 @@ -6,6 +6,7 @@
+ #include <linux/interrupt.h>
  #include <linux/module.h>
- #include <linux/cpu.h>
  
 +#include <asm/desc.h>
  #include <asm/tlbflush.h>
diff --git a/linux-2.6-qcserial-autosuspend.patch b/linux-2.6-qcserial-autosuspend.patch
new file mode 100644
index 000000000..524898813
--- /dev/null
+++ b/linux-2.6-qcserial-autosuspend.patch
@@ -0,0 +1,23 @@
+commit 0fe584342da141957c8642191b508ad39e9b19e6
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Sep 16 13:39:31 2010 -0400
+
+    usbserial: Enable USB autosuspend by default on qcserial
+    
+    Seems to work fine in my testing.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index cde67ca..2846ad8 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -118,6 +118,8 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 
+ 	spin_lock_init(&data->susp_lock);
+ 
++	usb_enable_autosuspend(serial->dev);
++
+ 	switch (nintf) {
+ 	case 1:
+ 		/* QDL mode */
diff --git a/linux-2.6-utrace-ptrace.patch b/linux-2.6-utrace-ptrace.patch
index caeae6760..a609fb628 100644
--- a/linux-2.6-utrace-ptrace.patch
+++ b/linux-2.6-utrace-ptrace.patch
@@ -29,9 +29,9 @@ index a85fb41..235c1b0 100644
  
 -
 +extern void ptrace_notify_stop(struct task_struct *tracee);
- extern long arch_ptrace(struct task_struct *child, long request,
- 			unsigned long addr, unsigned long data);
+ extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
  extern int ptrace_traceme(void);
+ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 diff --git a/kernel/Makefile b/kernel/Makefile
 index 6004913..b09c9a5 100644  
 --- a/kernel/Makefile
@@ -673,7 +673,7 @@ index ...a90078d 100644
 +	 * under ptrace.
 +	 */
 +	retval = -ERESTARTNOINTR;
-+	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
++	if (mutex_lock_interruptible(&task->cred_guard_mutex))
 +		goto out;
 +
 +	task_lock(task);
@@ -703,7 +703,7 @@ index ...a90078d 100644
 +unlock_tasklist:
 +	write_unlock_irq(&tasklist_lock);
 +unlock_creds:
-+	mutex_unlock(&task->signal->cred_guard_mutex);
++	mutex_unlock(&task->cred_guard_mutex);
 +out:
 +	return retval;
 +}
@@ -1014,7 +1014,7 @@ index ...a90078d 100644
 +			 struct iovec *kiov);
 +
 +int ptrace_request(struct task_struct *child, long request,
-+		   unsigned long addr, unsigned long data)
++		   long addr, long data)
 +{
 +	struct utrace_engine *engine = ptrace_lookup_engine(child);
 +	siginfo_t siginfo;
@@ -1412,7 +1412,7 @@ index 23bde94..daed9e8 100644
 +#define arch_ptrace_attach(child)	do { } while (0)
 +#endif
 +
-+SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data)
++SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
 +{
 +	struct task_struct *child;
 +	long ret;
@@ -1453,7 +1453,7 @@ index 23bde94..daed9e8 100644
 +	return ret;
 +}
 +
-+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data)
++int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
 +{
 +	unsigned long tmp;
 +	int copied;
@@ -1464,7 +1464,7 @@ index 23bde94..daed9e8 100644
 +	return put_user(tmp, (unsigned long __user *)data);
 +}
 +
-+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data)
++int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
 +{
 +	int copied;
 +
@@ -1595,7 +1595,7 @@ index 23bde94..daed9e8 100644
  unlock_tasklist:
 -	write_unlock_irq(&tasklist_lock);
 -unlock_creds:
--	mutex_unlock(&task->signal->cred_guard_mutex);
+-	mutex_unlock(&task->cred_guard_mutex);
 -out:
 -	return retval;
 -}
@@ -1656,7 +1656,7 @@ index 23bde94..daed9e8 100644
 - * If it should reap itself, return true.
 +	write_unlock_irq(&tasklist_lock);
 +unlock_creds:
-+	mutex_unlock(&task->signal->cred_guard_mutex);
++	mutex_unlock(&task->cred_guard_mutex);
 +out:
 +	return retval;
 +}
@@ -1765,7 +1765,7 @@ index 23bde94..daed9e8 100644
 -	return copied;
 -}
 -
- static int ptrace_setoptions(struct task_struct *child, unsigned long data)
+ static int ptrace_setoptions(struct task_struct *child, long data)
  {
  	child->ptrace &= ~PT_TRACE_MASK;
 @@ -530,47 +677,6 @@ static int ptrace_resume(struct task_str
@@ -1814,9 +1814,9 @@ index 23bde94..daed9e8 100644
 -#endif
 -
  int ptrace_request(struct task_struct *child, long request,
- 		   unsigned long addr, unsigned long data)
+ 		   long addr, long data)
  {
-@@ -686,91 +792,7 @@ int ptrace_request(struct task_struct *c
+@@ -686,88 +792,7 @@ int ptrace_request(struct task_struct *c
  	return ret;
  }
  
@@ -1839,8 +1839,7 @@ index 23bde94..daed9e8 100644
 -#define arch_ptrace_attach(child)	do { } while (0)
 -#endif
 -
--SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
--		unsigned long, data)
+-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
 -{
 -	struct task_struct *child;
 -	long ret;
@@ -1881,8 +1880,7 @@ index 23bde94..daed9e8 100644
 -	return ret;
 -}
 -
--int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
--			    unsigned long data)
+-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
 -{
 -	unsigned long tmp;
 -	int copied;
@@ -1893,8 +1891,7 @@ index 23bde94..daed9e8 100644
 -	return put_user(tmp, (unsigned long __user *)data);
 -}
 -
--int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
--			    unsigned long data)
+-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
 -{
 -	int copied;
 -
diff --git a/linux-2.6-utrace.patch b/linux-2.6-utrace.patch
index c50b83ea8..a12357e68 100644
--- a/linux-2.6-utrace.patch
+++ b/linux-2.6-utrace.patch
@@ -44,7 +44,7 @@ index 34929f2..884c36b 100644
 +++ b/Documentation/DocBook/Makefile
 @@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml de
  	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
- 	    80211.xml debugobjects.xml sh.xml regulator.xml \
+ 	    mac80211.xml debugobjects.xml sh.xml regulator.xml \
  	    alsa-driver-api.xml writing-an-alsa-driver.xml \
 -	    tracepoint.xml media.xml drm.xml
 +	    tracepoint.xml utrace.xml media.xml drm.xml
@@ -1636,9 +1636,9 @@ index 2de5b1c..a283086 100644
 +	  kernel interface exported to kernel modules, to track events in
 +	  user threads, extract and change user thread state.
 +
- source "kernel/irq/Kconfig"
- 
  menu "RCU Subsystem"
+ 
+ choice
 diff --git a/kernel/Makefile b/kernel/Makefile
 index 0b72d1a..6004913 100644  
 --- a/kernel/Makefile
diff --git a/linux-2.6-uvc-autosuspend.patch b/linux-2.6-uvc-autosuspend.patch
new file mode 100644
index 000000000..6c965c62f
--- /dev/null
+++ b/linux-2.6-uvc-autosuspend.patch
@@ -0,0 +1,22 @@
+commit 4a3757e0ae269f710292dd75013532c5a57ccb00
+Author: Matthew Garrett <mjg@redhat.com>
+Date:   Thu Sep 16 13:38:38 2010 -0400
+
+    uvc: Enable USB autosuspend by default on uvcvideo
+    
+    We've been doing this for a while in Fedora without any complaints.
+    
+    Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
+index 8bdd940..28ed5b4 100644
+--- a/drivers/media/video/uvc/uvc_driver.c
++++ b/drivers/media/video/uvc/uvc_driver.c
+@@ -1814,6 +1814,7 @@ static int uvc_probe(struct usb_interface *intf,
+ 	}
+ 
+ 	uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
++	usb_enable_autosuspend(udev);
+ 	return 0;
+ 
+ error:
diff --git a/pci-crs-fixes.patch b/pci-crs-fixes.patch
new file mode 100644
index 000000000..b4fd01502
--- /dev/null
+++ b/pci-crs-fixes.patch
@@ -0,0 +1,593 @@
+    This revision is to address two problems found by Horst H. von Brand while
+    testing the v2 patches in Fedora:
+      https://bugzilla.redhat.com/show_bug.cgi?id=637647
+    On his machine, we don't use _CRS by default, and the BIOS left some bridge
+    windows disabled.
+
+    Problem 1: When we assigned space for the windows, we started at the top
+    and allocated [mem 0xffffffffffe00000-0xffffffffffffffff], which is
+    obviously useless because the CPU doesn't support physical addresses that
+    large.
+
+    Problem 2: Subsequent allocations failed because I made an error in
+    find_resource().  We look for available space from [child->end + 1 to
+    root->end], and if the last child ends exactly at 0xffffffffffffffff, we
+    wrap around and start from zero.
+
+    I made the top-down allocation conditional: an arch can select it at
+    boot-time, and there's a kernel command line option to change it for
+    debugging.
+
+
+When we move PCI devices, we currently allocate space bottom-up, i.e., we look
+at PCI bus resources in the order we found them, we look at gaps between child
+resources bottom-up, and we align the new space at the bottom of an available
+region.
+
+On x86, we move PCI devices more than we used to because we now pay attention
+to the PCI host bridge windows from ACPI.  For example, when we find a device
+that's outside all the known host bridge windows, we try to move it into a
+window, and we look for space starting at the bottom.
+
+Windows does similar device moves, but it looks for space top-down rather than
+bottom-up.  Since most machines are better-tested with Windows than Linux, this
+difference means that Linux is more likely to trip over BIOS bugs in the PCI
+host bridge window descriptions than Windows is.
+
+We've had several reports of Dell machines where the BIOS leaves the AHCI
+controller outside the host bridge windows (BIOS bug #1), *and* the lowest
+host bridge window includes an area that doesn't actually reach PCI (BIOS
+bug #2).  The result is that Windows (which moves AHCI to the top of a window)
+works fine, while Linux (which moves AHCI to the bottom, buggy, area) doesn't
+work.
+
+These patches change Linux to allocate space more like Windows does:
+
+    1) The x86 pcibios_align_resource() will choose space from the
+       end of an available area, not the beginning.
+
+    2) In the generic allocate_resource() path, we'll look for space
+       between existing children from the top, not from the bottom.
+
+    3) When pci_bus_alloc_resource() looks for available space, it
+       will start from the highest window, not the first one we found.
+
+This series fixes a 2.6.34 regression that prevents many Dell Precision
+workstations from booting:
+
+    https://bugzilla.kernel.org/show_bug.cgi?id=16228
+
+Changes from v3 to v4:
+    - Use round_down() rather than adding ALIGN_DOWN().
+    - Replace ARCH_HAS_TOP_DOWN_ALLOC #define with a boot-time architecture
+      choice and add a "resource_alloc_from_bottom" command line option to
+      revert to the old behavior (NOTE: this only affects allocate_resource(),
+      not pcibios_align_resource() or pci_bus_alloc_resource()).
+    - Fixed find_resource_from_top() again; it still didn't handle a child
+      that ended at the parent's end correctly.
+
+Changes from v2 to v3:
+    - Updated iomem_resource.end to reflect the end of usable physical address
+      space.  Otherwise, we might allocate right up to 0xffffffff_ffffffff,
+      which isn't usable.
+    - Make allocate_resource() change conditional on ARCH_HAS_TOP_DOWN_ALLOC.
+      Without arch-specific changes like the above, it's too dangerous to
+      make this change for everybody at once.
+    - Fix 64-bit wraparound in find_resource().  If the last child happened
+      to end at ~0, we computed the highest available space as [child->end + 1,
+      root->end], which makes us think the available space started at 0,
+      which makes us return space that may already be allocated.
+
+Changes from v1 to v2:
+    - Moved check for allocating before the available area from
+      pcibios_align_resource() to find_resource().  Better to do it
+      after the alignment callback is done, and make it generic.
+    - Fixed pcibios_align_resource() alignment.  If we start from the
+      end of the available area, we must align *downward*, not upward.
+    - Fixed pcibios_align_resource() ISA alias avoidance.  Again, since
+      the starting point is the end of the area, we must align downward
+      when we avoid aliased areas.
+---
+
+Bjorn Helgaas (6):
+      resources: ensure alignment callback doesn't allocate below available start
+      resources: support allocating space within a region from the top down
+      PCI: allocate bus resources from the top down
+      x86/PCI: allocate space from the end of a region, not the beginning
+      x86: update iomem_resource end based on CPU physical address capabilities
+      x86: allocate space within a region top-down
+
+
+ Documentation/kernel-parameters.txt |    5 ++
+ arch/x86/kernel/setup.c             |    2 +
+ arch/x86/pci/i386.c                 |   17 ++++--
+ drivers/pci/bus.c                   |   53 +++++++++++++++++--
+ include/linux/ioport.h              |    1 
+ kernel/resource.c                   |   99 ++++++++++++++++++++++++++++++++++-
+ 6 files changed, 163 insertions(+), 14 deletions(-)
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+The alignment callback returns a proposed location, which may have been
+adjusted to avoid ISA aliases or for other architecture-specific reasons.
+We already had a check ("tmp.start < tmp.end") to make sure the callback
+doesn't return a location above the available area.
+
+This patch adds a check to make sure the callback doesn't return something
+*below* the available area, as may happen if the callback tries to allocate
+top-down.
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ kernel/resource.c |   10 ++++++++--
+ 1 files changed, 8 insertions(+), 2 deletions(-)
+
+
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 7b36976..ace2269 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -371,6 +371,7 @@ static int find_resource(struct resource *root, struct resource *new,
+ {
+ 	struct resource *this = root->child;
+ 	struct resource tmp = *new;
++	resource_size_t start;
+ 
+ 	tmp.start = root->start;
+ 	/*
+@@ -391,8 +392,13 @@ static int find_resource(struct resource *root, struct resource *new,
+ 		if (tmp.end > max)
+ 			tmp.end = max;
+ 		tmp.start = ALIGN(tmp.start, align);
+-		if (alignf)
+-			tmp.start = alignf(alignf_data, &tmp, size, align);
++		if (alignf) {
++			start = alignf(alignf_data, &tmp, size, align);
++			if (tmp.start <= start && start <= tmp.end)
++				tmp.start = start;
++			else
++				tmp.start = tmp.end;
++		}
+ 		if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
+ 			new->start = tmp.start;
+ 			new->end = tmp.start + size - 1;
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+Allocate space from the top of a region first, then work downward,
+if an architecture desires this.
+
+When we allocate space from a resource, we look for gaps between children
+of the resource.  Previously, we always looked at gaps from the bottom up.
+For example, given this:
+
+    [mem 0xbff00000-0xf7ffffff] PCI Bus 0000:00
+      [mem 0xbff00000-0xbfffffff] gap -- available
+      [mem 0xc0000000-0xdfffffff] PCI Bus 0000:02
+      [mem 0xe0000000-0xf7ffffff] gap -- available
+
+we attempted to allocate from the [mem 0xbff00000-0xbfffffff] gap first,
+then the [mem 0xe0000000-0xf7ffffff] gap.
+
+With this patch an architecture can choose to allocate from the top gap
+[mem 0xe0000000-0xf7ffffff] first.
+
+We can't do this across the board because iomem_resource.end is initialized
+to 0xffffffff_ffffffff on 64-bit architectures, and most machines can't
+address the entire 64-bit physical address space.  Therefore, we only
+allocate top-down if the arch requests it by clearing
+"resource_alloc_from_bottom".
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ Documentation/kernel-parameters.txt |    5 ++
+ include/linux/ioport.h              |    1 
+ kernel/resource.c                   |   89 +++++++++++++++++++++++++++++++++++
+ 3 files changed, 94 insertions(+), 1 deletions(-)
+
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 8dd7248..fe50cbd 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2156,6 +2156,11 @@ and is between 256 and 4096 characters. It is defined in the file
+ 	reset_devices	[KNL] Force drivers to reset the underlying device
+ 			during initialization.
+ 
++	resource_alloc_from_bottom
++			Allocate new resources from the beginning of available
++			space, not the end.  If you need to use this, please
++			report a bug.
++
+ 	resume=		[SWSUSP]
+ 			Specify the partition device for software suspend
+ 
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index b227902..d377ea8 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -112,6 +112,7 @@ struct resource_list {
+ /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+ extern struct resource ioport_resource;
+ extern struct resource iomem_resource;
++extern int resource_alloc_from_bottom;
+ 
+ extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
+ extern int request_resource(struct resource *root, struct resource *new);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index ace2269..8d337a9 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource);
+ 
+ static DEFINE_RWLOCK(resource_lock);
+ 
++/*
++ * By default, we allocate free space bottom-up.  The architecture can request
++ * top-down by clearing this flag.  The user can override the architecture's
++ * choice with the "resource_alloc_from_bottom" kernel boot option, but that
++ * should only be a debugging tool.
++ */
++int resource_alloc_from_bottom = 1;
++
++static __init int setup_alloc_from_bottom(char *s)
++{
++	printk(KERN_INFO
++	       "resource: allocating from bottom-up; please report a bug\n");
++	resource_alloc_from_bottom = 1;
++	return 0;
++}
++early_param("resource_alloc_from_bottom", setup_alloc_from_bottom);
++
+ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+ 	struct resource *p = v;
+@@ -358,7 +375,74 @@ int __weak page_is_ram(unsigned long pfn)
+ }
+ 
+ /*
++ * Find the resource before "child" in the sibling list of "root" children.
++ */
++static struct resource *find_sibling_prev(struct resource *root, struct resource *child)
++{
++	struct resource *this;
++
++	for (this = root->child; this; this = this->sibling)
++		if (this->sibling == child)
++			return this;
++
++	return NULL;
++}
++
++/*
++ * Find empty slot in the resource tree given range and alignment.
++ * This version allocates from the end of the root resource first.
++ */
++static int find_resource_from_top(struct resource *root, struct resource *new,
++				  resource_size_t size, resource_size_t min,
++				  resource_size_t max, resource_size_t align,
++				  resource_size_t (*alignf)(void *,
++						   const struct resource *,
++						   resource_size_t,
++						   resource_size_t),
++				  void *alignf_data)
++{
++	struct resource *this;
++	struct resource tmp = *new;
++	resource_size_t start;
++
++	tmp.start = root->end;
++	tmp.end = root->end;
++
++	this = find_sibling_prev(root, NULL);
++	for (;;) {
++		if (this) {
++			if (this->end < root->end)
++				tmp.start = this->end + 1;
++		} else
++			tmp.start = root->start;
++		if (tmp.start < min)
++			tmp.start = min;
++		if (tmp.end > max)
++			tmp.end = max;
++		tmp.start = ALIGN(tmp.start, align);
++		if (alignf) {
++			start = alignf(alignf_data, &tmp, size, align);
++			if (tmp.start <= start && start <= tmp.end)
++				tmp.start = start;
++			else
++				tmp.start = tmp.end;
++		}
++		if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
++			new->start = tmp.start;
++			new->end = tmp.start + size - 1;
++			return 0;
++		}
++		if (!this || this->start == root->start)
++			break;
++		tmp.end = this->start - 1;
++		this = find_sibling_prev(root, this);
++	}
++	return -EBUSY;
++}
++
++/*
+  * Find empty slot in the resource tree given range and alignment.
++ * This version allocates from the beginning of the root resource first.
+  */
+ static int find_resource(struct resource *root, struct resource *new,
+ 			 resource_size_t size, resource_size_t min,
+@@ -435,7 +519,10 @@ int allocate_resource(struct resource *root, struct resource *new,
+ 	int err;
+ 
+ 	write_lock(&resource_lock);
+-	err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
++	if (resource_alloc_from_bottom)
++		err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
++	else
++		err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data);
+ 	if (err >= 0 && __request_resource(root, new))
+ 		err = -EBUSY;
+ 	write_unlock(&resource_lock);
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+Allocate space from the highest-address PCI bus resource first, then work
+downward.
+
+Previously, we looked for space in PCI host bridge windows in the order
+we discovered the windows.  For example, given the following windows
+(discovered via an ACPI _CRS method):
+
+    pci_root PNP0A03:00: host bridge window [mem 0x000a0000-0x000bffff]
+    pci_root PNP0A03:00: host bridge window [mem 0x000c0000-0x000effff]
+    pci_root PNP0A03:00: host bridge window [mem 0x000f0000-0x000fffff]
+    pci_root PNP0A03:00: host bridge window [mem 0xbff00000-0xf7ffffff]
+    pci_root PNP0A03:00: host bridge window [mem 0xff980000-0xff980fff]
+    pci_root PNP0A03:00: host bridge window [mem 0xff97c000-0xff97ffff]
+    pci_root PNP0A03:00: host bridge window [mem 0xfed20000-0xfed9ffff]
+
+we attempted to allocate from [mem 0x000a0000-0x000bffff] first, then
+[mem 0x000c0000-0x000effff], and so on.
+
+With this patch, we allocate from [mem 0xff980000-0xff980fff] first, then
+[mem 0xff97c000-0xff97ffff], [mem 0xfed20000-0xfed9ffff], etc.
+
+Allocating top-down follows Windows practice, so we're less likely to
+trip over BIOS defects in the _CRS description.
+
+On the machine above (a Dell T3500), the [mem 0xbff00000-0xbfffffff] region
+doesn't actually work and is likely a BIOS defect.  The symptom is that we
+move the AHCI controller to 0xbff00000, which leads to "Boot has failed,
+sleeping forever," a BUG in ahci_stop_engine(), or some other boot failure.
+
+Reference: https://bugzilla.kernel.org/show_bug.cgi?id=16228#c43
+Reference: https://bugzilla.redhat.com/show_bug.cgi?id=620313
+Reference: https://bugzilla.redhat.com/show_bug.cgi?id=629933
+Reported-by: Brian Bloniarz <phunge0@hotmail.com>
+Reported-and-tested-by: Stefan Becker <chemobejk@gmail.com>
+Reported-by: Denys Vlasenko <dvlasenk@redhat.com>
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ drivers/pci/bus.c |   53 ++++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 files changed, 48 insertions(+), 5 deletions(-)
+
+
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 7f0af0e..172bf26 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -64,6 +64,49 @@ void pci_bus_remove_resources(struct pci_bus *bus)
+ 	}
+ }
+ 
++/*
++ * Find the highest-address bus resource below the cursor "res".  If the
++ * cursor is NULL, return the highest resource.
++ */
++static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
++						   unsigned int type,
++						   struct resource *res)
++{
++	struct resource *r, *prev = NULL;
++	int i;
++
++	pci_bus_for_each_resource(bus, r, i) {
++		if (!r)
++			continue;
++
++		if ((r->flags & IORESOURCE_TYPE_BITS) != type)
++			continue;
++
++		/* If this resource is at or past the cursor, skip it */
++		if (res) {
++			if (r == res)
++				continue;
++			if (r->end > res->end)
++				continue;
++			if (r->end == res->end && r->start > res->start)
++				continue;
++		}
++
++		if (!prev)
++			prev = r;
++
++		/*
++		 * A small resource is higher than a large one that ends at
++		 * the same address.
++		 */
++		if (r->end > prev->end ||
++		    (r->end == prev->end && r->start > prev->start))
++			prev = r;
++	}
++
++	return prev;
++}
++
+ /**
+  * pci_bus_alloc_resource - allocate a resource from a parent bus
+  * @bus: PCI bus
+@@ -89,9 +132,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+ 					  resource_size_t),
+ 		void *alignf_data)
+ {
+-	int i, ret = -ENOMEM;
++	int ret = -ENOMEM;
+ 	struct resource *r;
+ 	resource_size_t max = -1;
++	unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
+ 
+ 	type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ 
+@@ -99,10 +143,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+ 	if (!(res->flags & IORESOURCE_MEM_64))
+ 		max = PCIBIOS_MAX_MEM_32;
+ 
+-	pci_bus_for_each_resource(bus, r, i) {
+-		if (!r)
+-			continue;
+-
++	/* Look for space at highest addresses first */
++	r = pci_bus_find_resource_prev(bus, type, NULL);
++	for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
+ 		/* type_mask must match */
+ 		if ((res->flags ^ r->flags) & type_mask)
+ 			continue;
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+Allocate from the end of a region, not the beginning.
+
+For example, if we need to allocate 0x800 bytes for a device on bus
+0000:00 given these resources:
+
+    [mem 0xbff00000-0xdfffffff] PCI Bus 0000:00
+      [mem 0xc0000000-0xdfffffff] PCI Bus 0000:02
+
+the available space at [mem 0xbff00000-0xbfffffff] is passed to the
+alignment callback (pcibios_align_resource()).  Prior to this patch, we
+would put the new 0x800 byte resource at the beginning of that available
+space, i.e., at [mem 0xbff00000-0xbff007ff].
+
+With this patch, we put it at the end, at [mem 0xbffff800-0xbfffffff].
+
+Reference: https://bugzilla.kernel.org/show_bug.cgi?id=16228#c41
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ arch/x86/pci/i386.c |   17 +++++++++++------
+ 1 files changed, 11 insertions(+), 6 deletions(-)
+
+
+diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
+index 5525309..826140a 100644
+--- a/arch/x86/pci/i386.c
++++ b/arch/x86/pci/i386.c
+@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
+ 			resource_size_t size, resource_size_t align)
+ {
+ 	struct pci_dev *dev = data;
+-	resource_size_t start = res->start;
++	resource_size_t start = round_down(res->end - size + 1, align);
+ 
+ 	if (res->flags & IORESOURCE_IO) {
+-		if (skip_isa_ioresource_align(dev))
+-			return start;
+-		if (start & 0x300)
+-			start = (start + 0x3ff) & ~0x3ff;
++
++		/*
++		 * If we're avoiding ISA aliases, the largest contiguous I/O
++		 * port space is 256 bytes.  Clearing bits 9 and 10 preserves
++		 * all 256-byte and smaller alignments, so the result will
++		 * still be correctly aligned.
++		 */
++		if (!skip_isa_ioresource_align(dev))
++			start &= ~0x300;
+ 	} else if (res->flags & IORESOURCE_MEM) {
+ 		if (start < BIOS_END)
+-			start = BIOS_END;
++			start = res->end;	/* fail; no space */
+ 	}
+ 	return start;
+ }
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+The iomem_resource map reflects the available physical address space.
+We statically initialize the end to -1, i.e., 0xffffffff_ffffffff, but
+of course we can only use as much as the CPU can address.
+
+This patch updates the end based on the CPU capabilities, so we don't
+mistakenly allocate space that isn't usable, as we're likely to do when
+allocating from the top-down.
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ arch/x86/kernel/setup.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index c3a4fbb..922b5a1 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -788,6 +788,7 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	x86_init.oem.arch_setup();
+ 
++	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
+ 	setup_memory_map();
+ 	parse_setup_data();
+ 	/* update the e820_saved too */
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+Request that allocate_resource() use available space from high addresses
+first, rather than the default of using low addresses first.
+
+The most common place this makes a difference is when we move or assign
+new PCI device resources.  Low addresses are generally scarce, so it's
+better to use high addresses when possible.  This follows Windows practice
+for PCI allocation.
+
+Reference: https://bugzilla.kernel.org/show_bug.cgi?id=16228#c42
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+---
+
+ arch/x86/kernel/setup.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 922b5a1..0fe76df 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -788,6 +788,7 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	x86_init.oem.arch_setup();
+ 
++	resource_alloc_from_bottom = 0;
+ 	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
+ 	setup_memory_map();
+ 	parse_setup_data();
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-pci" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
\ No newline at end of file
diff --git a/runtime_pm_fixups.patch b/runtime_pm_fixups.patch
index 789115810..6315dc176 100644
--- a/runtime_pm_fixups.patch
+++ b/runtime_pm_fixups.patch
@@ -110,5 +110,5 @@ index 4de84ce..284f43c 100644
 -int acpi_disable_wakeup_device_power(struct acpi_device *dev);
 +int acpi_disable_wakeup_device_power(struct acpi_device *dev, int state);
  
- #ifdef CONFIG_PM_OPS
+ #ifdef CONFIG_PM_SLEEP
  int acpi_pm_device_sleep_state(struct device *, int *);
diff --git a/sources b/sources
index d52c071f5..df0066685 100644
--- a/sources
+++ b/sources
@@ -1,3 +1,3 @@
 61f3739a73afb6914cb007f37fb09b62  linux-2.6.36.tar.bz2
-a84cf559615b5168ec1d5591841601ed  patch-2.6.37-rc5.bz2
-dbc90858467e28b39539ad6d3415a956  patch-2.6.37-rc5-git2.bz2
+dd38a6caf08df2822f93541ee95aed7d  patch-2.6.36.1.bz2
+33b11b4b8fcd47601a0e1e51586c4b04  patch-2.6.36.2-rc1.bz2
diff --git a/wacom-01-add-fuzz-parameters-to-features.patch b/wacom-01-add-fuzz-parameters-to-features.patch
new file mode 100644
index 000000000..4162726d3
--- /dev/null
+++ b/wacom-01-add-fuzz-parameters-to-features.patch
@@ -0,0 +1,69 @@
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Sun, 5 Sep 2010 19:25:11 +0000 (-0700)
+Subject: Input: wacom - add fuzz parameters to features
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=fed87e655a2c20468d628b37424af58287803afe
+
+Input: wacom - add fuzz parameters to features
+
+The signal-to-noise ratio varies between devices, but currently all
+devices are treated the same way. Add fuzz parameters to the feature
+struct, allowing for tailored treatment of devices.
+
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index 42ba369..e510e4f 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -333,8 +333,12 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ 	struct usb_host_interface *interface = intf->cur_altsetting;
+ 	struct hid_descriptor *hid_desc;
+ 
+-	/* default device to penabled */
++	/* default features */
+ 	features->device_type = BTN_TOOL_PEN;
++	features->x_fuzz = 4;
++	features->y_fuzz = 4;
++	features->pressure_fuzz = 0;
++	features->distance_fuzz = 0;
+ 
+ 	/* only Tablet PCs need to retrieve the info */
+ 	if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 6e29bad..6d7e164 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -951,9 +951,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ 
+ 	__set_bit(BTN_TOUCH, input_dev->keybit);
+ 
+-	input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
+-	input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
+-	input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
++	input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
++			     features->x_fuzz, 0);
++	input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
++			     features->y_fuzz, 0);
++	input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
++			     features->pressure_fuzz, 0);
+ 
+ 	__set_bit(ABS_MISC, input_dev->absbit);
+ 
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index 99e1a54..d769e9a 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -73,6 +73,10 @@ struct wacom_features {
+ 	int y_phy;
+ 	unsigned char unit;
+ 	unsigned char unitExpo;
++	int x_fuzz;
++	int y_fuzz;
++	int pressure_fuzz;
++	int distance_fuzz;
+ };
+ 
+ struct wacom_shared {
diff --git a/wacom-02-parse-the-bamboo-device-family.patch b/wacom-02-parse-the-bamboo-device-family.patch
new file mode 100644
index 000000000..f24d04d01
--- /dev/null
+++ b/wacom-02-parse-the-bamboo-device-family.patch
@@ -0,0 +1,122 @@
+From: Ping Cheng <pinglinux@gmail.com>
+Date: Sun, 5 Sep 2010 19:25:40 +0000 (-0700)
+Subject: Input: wacom - parse the Bamboo device family
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=4a88081e739a41d6d70bace7e0a027f9054ab540
+
+Input: wacom - parse the Bamboo device family
+
+The Bamboo devices have multiple interfaces which need to be setup
+separately. Use the HID parsing mechanism to achieve that.
+
+Signed-off-by: Ping Cheng <pinglinux@gmail.com>
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index e510e4f..98cba08 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -195,17 +195,30 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
+ 							features->pktlen = WACOM_PKGLEN_TPC2FG;
+ 							features->device_type = BTN_TOOL_TRIPLETAP;
+ 						}
+-						features->x_max =
+-							get_unaligned_le16(&report[i + 3]);
+-						features->x_phy =
+-							get_unaligned_le16(&report[i + 6]);
+-						features->unit = report[i + 9];
+-						features->unitExpo = report[i + 11];
+-						i += 12;
++						if (features->type == BAMBOO_PT) {
++							/* need to reset back */
++							features->pktlen = WACOM_PKGLEN_BBTOUCH;
++							features->device_type = BTN_TOOL_TRIPLETAP;
++							features->x_phy =
++								get_unaligned_le16(&report[i + 5]);
++							features->x_max =
++								get_unaligned_le16(&report[i + 8]);
++							i += 15;
++						} else {
++							features->x_max =
++								get_unaligned_le16(&report[i + 3]);
++							features->x_phy =
++								get_unaligned_le16(&report[i + 6]);
++							features->unit = report[i + 9];
++							features->unitExpo = report[i + 11];
++							i += 12;
++						}
+ 					} else if (pen) {
+ 						/* penabled only accepts exact bytes of data */
+ 						if (features->type == TABLETPC2FG)
+ 							features->pktlen = WACOM_PKGLEN_GRAPHIRE;
++						if (features->type == BAMBOO_PT)
++							features->pktlen = WACOM_PKGLEN_BBFUN;
+ 						features->device_type = BTN_TOOL_PEN;
+ 						features->x_max =
+ 							get_unaligned_le16(&report[i + 3]);
+@@ -234,6 +247,15 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
+ 							features->y_phy =
+ 								get_unaligned_le16(&report[i + 6]);
+ 							i += 7;
++						} else if (features->type == BAMBOO_PT) {
++							/* need to reset back */
++							features->pktlen = WACOM_PKGLEN_BBTOUCH;
++							features->device_type = BTN_TOOL_TRIPLETAP;
++							features->y_phy =
++								get_unaligned_le16(&report[i + 3]);
++							features->y_max =
++								get_unaligned_le16(&report[i + 6]);
++							i += 12;
+ 						} else {
+ 							features->y_max =
+ 								features->x_max;
+@@ -245,6 +267,8 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
+ 						/* penabled only accepts exact bytes of data */
+ 						if (features->type == TABLETPC2FG)
+ 							features->pktlen = WACOM_PKGLEN_GRAPHIRE;
++						if (features->type == BAMBOO_PT)
++							features->pktlen = WACOM_PKGLEN_BBFUN;
+ 						features->device_type = BTN_TOOL_PEN;
+ 						features->y_max =
+ 							get_unaligned_le16(&report[i + 3]);
+@@ -341,7 +365,8 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ 	features->distance_fuzz = 0;
+ 
+ 	/* only Tablet PCs need to retrieve the info */
+-	if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
++	if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
++	    (features->type != BAMBOO_PT))
+ 		goto out;
+ 
+ 	if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
+@@ -499,7 +524,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
+ 
+ 	strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
+ 
+-	if (features->type == TABLETPC || features->type == TABLETPC2FG) {
++	if (features->type == TABLETPC || features->type == TABLETPC2FG ||
++	    features->type == BAMBOO_PT) {
+ 		/* Append the device type to the name */
+ 		strlcat(wacom_wac->name,
+ 			features->device_type == BTN_TOOL_PEN ?
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index d769e9a..fb30895 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -21,6 +21,7 @@
+ #define WACOM_PKGLEN_INTUOS	10
+ #define WACOM_PKGLEN_TPC1FG	 5
+ #define WACOM_PKGLEN_TPC2FG	14
++#define WACOM_PKGLEN_BBTOUCH	20
+ 
+ /* device IDs */
+ #define STYLUS_DEVICE_ID	0x02
+@@ -44,6 +45,7 @@ enum {
+ 	PTU,
+ 	PL,
+ 	DTU,
++	BAMBOO_PT,
+ 	INTUOS,
+ 	INTUOS3S,
+ 	INTUOS3,
diff --git a/wacom-03-collect-device-quirks-into-single-function.patch b/wacom-03-collect-device-quirks-into-single-function.patch
new file mode 100644
index 000000000..7bf768770
--- /dev/null
+++ b/wacom-03-collect-device-quirks-into-single-function.patch
@@ -0,0 +1,107 @@
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Sun, 5 Sep 2010 19:26:16 +0000 (-0700)
+Subject: Input: wacom - collect device quirks into single function
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=bc73dd39e78dd6e5b34cd938b7f037a8bc041bdd
+
+Input: wacom - collect device quirks into single function
+
+Collect device-specific code into a single function, and use quirks to
+flag specific behavior instead.
+
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
+index 284dfaa..de5adb1 100644
+--- a/drivers/input/tablet/wacom.h
++++ b/drivers/input/tablet/wacom.h
+@@ -118,6 +118,7 @@ struct wacom {
+ extern const struct usb_device_id wacom_ids[];
+ 
+ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
++void wacom_setup_device_quirks(struct wacom_features *features);
+ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ 				    struct wacom_wac *wacom_wac);
+ #endif
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index 98cba08..fc6fd53 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -381,12 +381,6 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ 	if (error)
+ 		goto out;
+ 
+-	/* touch device found but size is not defined. use default */
+-	if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
+-		features->x_max = 1023;
+-		features->y_max = 1023;
+-	}
+-
+  out:
+ 	return error;
+ }
+@@ -522,10 +516,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
+ 	if (error)
+ 		goto fail2;
+ 
++	wacom_setup_device_quirks(features);
++
+ 	strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
+ 
+-	if (features->type == TABLETPC || features->type == TABLETPC2FG ||
+-	    features->type == BAMBOO_PT) {
++	if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
+ 		/* Append the device type to the name */
+ 		strlcat(wacom_wac->name,
+ 			features->device_type == BTN_TOOL_PEN ?
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 6d7e164..44b4a59 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -941,6 +941,22 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+ 	input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
+ }
+ 
++
++void wacom_setup_device_quirks(struct wacom_features *features)
++{
++
++	/* touch device found but size is not defined. use default */
++	if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
++		features->x_max = 1023;
++		features->y_max = 1023;
++	}
++
++	/* these device have multiple inputs */
++	if (features->type == TABLETPC || features->type == TABLETPC2FG ||
++	    features->type == BAMBOO_PT)
++		features->quirks |= WACOM_QUIRK_MULTI_INPUT;
++}
++
+ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ 				    struct wacom_wac *wacom_wac)
+ {
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index fb30895..6a1ff10 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -38,6 +38,9 @@
+ #define WACOM_REPORT_TPC1FG		6
+ #define WACOM_REPORT_TPC2FG		13
+ 
++/* device quirks */
++#define WACOM_QUIRK_MULTI_INPUT		0x0001
++
+ enum {
+ 	PENPARTNER = 0,
+ 	GRAPHIRE,
+@@ -79,6 +82,7 @@ struct wacom_features {
+ 	int y_fuzz;
+ 	int pressure_fuzz;
+ 	int distance_fuzz;
++	unsigned quirks;
+ };
+ 
+ struct wacom_shared {
diff --git a/wacom-04-add-support-for-the-bamboo-touch-trackpad.patch b/wacom-04-add-support-for-the-bamboo-touch-trackpad.patch
new file mode 100644
index 000000000..f104b2ce3
--- /dev/null
+++ b/wacom-04-add-support-for-the-bamboo-touch-trackpad.patch
@@ -0,0 +1,172 @@
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Sun, 5 Sep 2010 19:53:16 +0000 (-0700)
+Subject: Input: wacom - add support for the Bamboo Touch trackpad
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=cb734c03680eaaad64a20a666300eafd1ac260b2
+
+Input: wacom - add support for the Bamboo Touch trackpad
+
+Add support for the Bamboo Touch trackpad, and make it work with
+both the Synaptics X Driver and the Multitouch X Driver. The device
+uses MT slots internally, so the choice of protocol is a given.
+
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 44b4a59..4e9b1dd 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -855,6 +855,53 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+ 	return retval;
+ }
+ 
++static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
++{
++	struct input_dev *input = wacom->input;
++	unsigned char *data = wacom->data;
++	int sp = 0, sx = 0, sy = 0, count = 0;
++	int i;
++
++	if (len != WACOM_PKGLEN_BBTOUCH)
++		return 0;
++
++	for (i = 0; i < 2; i++) {
++		int p = data[9 * i + 2];
++		input_mt_slot(input, i);
++		if (p) {
++			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
++			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
++			input_report_abs(input, ABS_MT_PRESSURE, p);
++			input_report_abs(input, ABS_MT_POSITION_X, x);
++			input_report_abs(input, ABS_MT_POSITION_Y, y);
++			if (wacom->id[i] < 0)
++				wacom->id[i] = wacom->trk_id++ & MAX_TRACKING_ID;
++			if (!count++)
++				sp = p, sx = x, sy = y;
++		} else {
++			wacom->id[i] = -1;
++		}
++		input_report_abs(input, ABS_MT_TRACKING_ID, wacom->id[i]);
++	}
++
++	input_report_key(input, BTN_TOUCH, count > 0);
++	input_report_key(input, BTN_TOOL_FINGER, count == 1);
++	input_report_key(input, BTN_TOOL_DOUBLETAP, count == 2);
++
++	input_report_abs(input, ABS_PRESSURE, sp);
++	input_report_abs(input, ABS_X, sx);
++	input_report_abs(input, ABS_Y, sy);
++
++	input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
++	input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
++	input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
++	input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
++
++	input_sync(input);
++
++	return 0;
++}
++
+ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
+ {
+ 	bool sync;
+@@ -900,6 +947,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
+ 		sync = wacom_tpc_irq(wacom_wac, len);
+ 		break;
+ 
++	case BAMBOO_PT:
++		sync = wacom_bpt_irq(wacom_wac, len);
++		break;
++
+ 	default:
+ 		sync = false;
+ 		break;
+@@ -955,6 +1006,13 @@ void wacom_setup_device_quirks(struct wacom_features *features)
+ 	if (features->type == TABLETPC || features->type == TABLETPC2FG ||
+ 	    features->type == BAMBOO_PT)
+ 		features->quirks |= WACOM_QUIRK_MULTI_INPUT;
++
++	/* quirks for bamboo touch */
++	if (features->type == BAMBOO_PT &&
++	    features->device_type == BTN_TOOL_TRIPLETAP) {
++		features->pressure_max = 256;
++		features->pressure_fuzz = 16;
++	}
+ }
+ 
+ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+@@ -1095,6 +1153,33 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ 	case PENPARTNER:
+ 		__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ 		break;
++
++	case BAMBOO_PT:
++		__clear_bit(ABS_MISC, input_dev->absbit);
++
++		if (features->device_type == BTN_TOOL_TRIPLETAP) {
++			__set_bit(BTN_LEFT, input_dev->keybit);
++			__set_bit(BTN_FORWARD, input_dev->keybit);
++			__set_bit(BTN_BACK, input_dev->keybit);
++			__set_bit(BTN_RIGHT, input_dev->keybit);
++
++			__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
++			__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
++
++			input_mt_create_slots(input_dev, 2);
++			input_set_abs_params(input_dev, ABS_MT_POSITION_X,
++					     0, features->x_max,
++					     features->x_fuzz, 0);
++			input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
++					     0, features->y_max,
++					     features->y_fuzz, 0);
++			input_set_abs_params(input_dev, ABS_MT_PRESSURE,
++					     0, features->pressure_max,
++					     features->pressure_fuzz, 0);
++			input_set_abs_params(input_dev, ABS_MT_TRACKING_ID, 0,
++					     MAX_TRACKING_ID, 0, 0);
++		}
++		break;
+ 	}
+ }
+ 
+@@ -1232,6 +1317,8 @@ static const struct wacom_features wacom_features_0xE3 =
+ 	{ "Wacom ISDv4 E3",       WACOM_PKGLEN_TPC2FG,    26202, 16325,  255,  0, TABLETPC2FG };
+ static const struct wacom_features wacom_features_0x47 =
+ 	{ "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023, 31, INTUOS };
++static struct wacom_features wacom_features_0xD0 =
++	{ "Wacom Bamboo 2FG",     WACOM_PKGLEN_BBFUN,     14720,  9200, 1023, 63, BAMBOO_PT };
+ 
+ #define USB_DEVICE_WACOM(prod)					\
+ 	USB_DEVICE(USB_VENDOR_ID_WACOM, prod),			\
+@@ -1296,6 +1383,7 @@ const struct usb_device_id wacom_ids[] = {
+ 	{ USB_DEVICE_WACOM(0xC6) },
+ 	{ USB_DEVICE_WACOM(0xC7) },
+ 	{ USB_DEVICE_WACOM(0xCE) },
++	{ USB_DEVICE_WACOM(0xD0) },
+ 	{ USB_DEVICE_WACOM(0xF0) },
+ 	{ USB_DEVICE_WACOM(0xCC) },
+ 	{ USB_DEVICE_WACOM(0x90) },
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index 6a1ff10..a23d6a5 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -41,6 +41,9 @@
+ /* device quirks */
+ #define WACOM_QUIRK_MULTI_INPUT		0x0001
+ 
++/* largest reported tracking id */
++#define MAX_TRACKING_ID			0xfff
++
+ enum {
+ 	PENPARTNER = 0,
+ 	GRAPHIRE,
+@@ -96,6 +99,7 @@ struct wacom_wac {
+ 	int id[3];
+ 	__u32 serial[2];
+ 	int last_finger;
++	int trk_id;
+ 	struct wacom_features features;
+ 	struct wacom_shared *shared;
+ 	struct input_dev *input;
diff --git a/wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch b/wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch
new file mode 100644
index 000000000..1809ec03e
--- /dev/null
+++ b/wacom-05-add-a-quirk-for-low-resolution-bamboo-devices.patch
@@ -0,0 +1,69 @@
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Sun, 5 Sep 2010 19:57:13 +0000 (-0700)
+Subject: Input: wacom - add a quirk for low resolution Bamboo devices
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=f4ccbef2886968ed409939531f6dd0474d53a12a
+
+Input: wacom - add a quirk for low resolution Bamboo devices
+
+The Bamboo Touch reports a sub-screen resolution of 480x320.  The
+signal-to-noise ratio is only about 100, so filtering is needed in
+order to reduce the jitter to a usable level. However, the low
+resolution leads to round-off errors in the EWMA filter, resulting in
+extremely jerky pointer motion. This patch explicitly sets a higher
+resolution for those devices, and tells this to the completion handler
+via a low-resolution quirk.
+
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 4e9b1dd..2f4411a 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -857,6 +857,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+ 
+ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+ {
++	struct wacom_features *features = &wacom->features;
+ 	struct input_dev *input = wacom->input;
+ 	unsigned char *data = wacom->data;
+ 	int sp = 0, sx = 0, sy = 0, count = 0;
+@@ -871,6 +872,10 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+ 		if (p) {
+ 			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
+ 			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
++			if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
++				x <<= 5;
++				y <<= 5;
++			}
+ 			input_report_abs(input, ABS_MT_PRESSURE, p);
+ 			input_report_abs(input, ABS_MT_POSITION_X, x);
+ 			input_report_abs(input, ABS_MT_POSITION_Y, y);
+@@ -1010,8 +1015,13 @@ void wacom_setup_device_quirks(struct wacom_features *features)
+ 	/* quirks for bamboo touch */
+ 	if (features->type == BAMBOO_PT &&
+ 	    features->device_type == BTN_TOOL_TRIPLETAP) {
++		features->x_max <<= 5;
++		features->y_max <<= 5;
++		features->x_fuzz <<= 5;
++		features->y_fuzz <<= 5;
+ 		features->pressure_max = 256;
+ 		features->pressure_fuzz = 16;
++		features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES;
+ 	}
+ }
+ 
+diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
+index a23d6a5..00ca015 100644
+--- a/drivers/input/tablet/wacom_wac.h
++++ b/drivers/input/tablet/wacom_wac.h
+@@ -40,6 +40,7 @@
+ 
+ /* device quirks */
+ #define WACOM_QUIRK_MULTI_INPUT		0x0001
++#define WACOM_QUIRK_BBTOUCH_LOWRES	0x0002
+ 
+ /* largest reported tracking id */
+ #define MAX_TRACKING_ID			0xfff
diff --git a/wacom-06-request-tablet-data-for-bamboo-pens.patch b/wacom-06-request-tablet-data-for-bamboo-pens.patch
new file mode 100644
index 000000000..8b31d4eab
--- /dev/null
+++ b/wacom-06-request-tablet-data-for-bamboo-pens.patch
@@ -0,0 +1,55 @@
+From: Chris Bagwell <chris@cnpbagwell.com>
+Date: Sun, 12 Sep 2010 07:08:40 +0000 (-0700)
+Subject: Input: wacom - request tablet data for Bamboo Pens
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=3dc9f40de4dddf9147b80cf15be633189a2b70f4
+
+Input: wacom - request tablet data for Bamboo Pens
+
+Bamboo P&T need to use second form of usb_set_report() to
+ask to report tablet data.
+
+With previous addition of Bamboo Touch, BTN_TOOL_TRIPLETAP is now used
+for both TABLETPC2FG and BAMBOO_PT types.  So reduced check to
+match type=TABLETPC2FG.
+
+This change shows redundant check for !TABLETPC2FG in else statement.
+
+Signed-off-by: Chris Bagwell <chris@cnpbagwell.com>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
+index fc6fd53..1e3af29 100644
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -319,8 +319,9 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
+ 	if (!rep_data)
+ 		return error;
+ 
+-	/* ask to report tablet data if it is 2FGT or not a Tablet PC */
+-	if (features->device_type == BTN_TOOL_TRIPLETAP) {
++	/* ask to report tablet data if it is 2FGT Tablet PC or
++	 * not a Tablet PC */
++	if (features->type == TABLETPC2FG) {
+ 		do {
+ 			rep_data[0] = 3;
+ 			rep_data[1] = 4;
+@@ -332,7 +333,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
+ 					WAC_HID_FEATURE_REPORT, report_id,
+ 					rep_data, 3);
+ 		} while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
+-	} else if (features->type != TABLETPC && features->type != TABLETPC2FG) {
++	} else if (features->type != TABLETPC) {
+ 		do {
+ 			rep_data[0] = 2;
+ 			rep_data[1] = 2;
+@@ -364,7 +365,7 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ 	features->pressure_fuzz = 0;
+ 	features->distance_fuzz = 0;
+ 
+-	/* only Tablet PCs need to retrieve the info */
++	/* only Tablet PCs and Bamboo P&T need to retrieve the info */
+ 	if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
+ 	    (features->type != BAMBOO_PT))
+ 		goto out;
diff --git a/wacom-07-move-bamboo-touch-irq-to-its-own-function.patch b/wacom-07-move-bamboo-touch-irq-to-its-own-function.patch
new file mode 100644
index 000000000..7495ac390
--- /dev/null
+++ b/wacom-07-move-bamboo-touch-irq-to-its-own-function.patch
@@ -0,0 +1,52 @@
+From: Chris Bagwell <chris@cnpbagwell.com>
+Date: Sun, 12 Sep 2010 07:09:27 +0000 (-0700)
+Subject: Input: wacom - move Bamboo Touch irq to its own function
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=e1d38e49ad97eec5024342e1244279b645e36688
+
+Input: wacom - move Bamboo Touch irq to its own function
+
+This is in preparation of pen support in same irq handler.
+
+Signed-off-by: Chris Bagwell <chris@cnpbagwell.com>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 2f4411a..2f7ed9a 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -855,7 +855,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+ 	return retval;
+ }
+ 
+-static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
++static int wacom_bpt_touch(struct wacom_wac *wacom)
+ {
+ 	struct wacom_features *features = &wacom->features;
+ 	struct input_dev *input = wacom->input;
+@@ -863,9 +863,6 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+ 	int sp = 0, sx = 0, sy = 0, count = 0;
+ 	int i;
+ 
+-	if (len != WACOM_PKGLEN_BBTOUCH)
+-		return 0;
+-
+ 	for (i = 0; i < 2; i++) {
+ 		int p = data[9 * i + 2];
+ 		input_mt_slot(input, i);
+@@ -907,6 +904,14 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+ 	return 0;
+ }
+ 
++static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
++{
++	if (len == WACOM_PKGLEN_BBTOUCH)
++		return wacom_bpt_touch(wacom);
++
++	return 0;
++}
++
+ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
+ {
+ 	bool sync;
diff --git a/wacom-08-add-support-for-bamboo-pen.patch b/wacom-08-add-support-for-bamboo-pen.patch
new file mode 100644
index 000000000..319bf6c53
--- /dev/null
+++ b/wacom-08-add-support-for-bamboo-pen.patch
@@ -0,0 +1,130 @@
+From: Chris Bagwell <chris@cnpbagwell.com>
+Date: Sun, 12 Sep 2010 07:11:35 +0000 (-0700)
+Subject: Input: wacom - add support for Bamboo Pen
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=2aaacb153689dbe9064e4db7e9d00de0edfc1fa0
+
+Input: wacom - add support for Bamboo Pen
+
+This adds support for Pen on Bamboo Pen and Bamboo Pen&Touch devices.
+Touchpad is handled by previous Bamboo Touch logic.
+
+Signed-off-by: Chris Bagwell <chris@cnpbagwell.com>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 2f7ed9a..536156b 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -904,10 +904,75 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
+ 	return 0;
+ }
+ 
++static int wacom_bpt_pen(struct wacom_wac *wacom)
++{
++	struct input_dev *input = wacom->input;
++	unsigned char *data = wacom->data;
++	int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
++
++	/*
++	 * Similar to Graphire protocol, data[1] & 0x20 is proximity and
++	 * data[1] & 0x18 is tool ID.  0x30 is safety check to ignore
++	 * 2 unused tool ID's.
++	 */
++	prox = (data[1] & 0x30) == 0x30;
++
++	/*
++	 * All reports shared between PEN and RUBBER tool must be
++	 * forced to a known starting value (zero) when transitioning to
++	 * out-of-prox.
++	 *
++	 * If not reset then, to userspace, it will look like lost events
++	 * if new tool comes in-prox with same values as previous tool sent.
++	 *
++	 * Hardware does report zero in most out-of-prox cases but not all.
++	 */
++	if (prox) {
++		if (!wacom->shared->stylus_in_proximity) {
++			if (data[1] & 0x08) {
++				wacom->tool[0] = BTN_TOOL_RUBBER;
++				wacom->id[0] = ERASER_DEVICE_ID;
++			} else {
++				wacom->tool[0] = BTN_TOOL_PEN;
++				wacom->id[0] = STYLUS_DEVICE_ID;
++			}
++			wacom->shared->stylus_in_proximity = true;
++		}
++		x = le16_to_cpup((__le16 *)&data[2]);
++		y = le16_to_cpup((__le16 *)&data[4]);
++		p = le16_to_cpup((__le16 *)&data[6]);
++		d = data[8];
++		pen = data[1] & 0x01;
++		btn1 = data[1] & 0x02;
++		btn2 = data[1] & 0x04;
++	}
++
++	input_report_key(input, BTN_TOUCH, pen);
++	input_report_key(input, BTN_STYLUS, btn1);
++	input_report_key(input, BTN_STYLUS2, btn2);
++
++	input_report_abs(input, ABS_X, x);
++	input_report_abs(input, ABS_Y, y);
++	input_report_abs(input, ABS_PRESSURE, p);
++	input_report_abs(input, ABS_DISTANCE, d);
++
++	if (!prox) {
++		wacom->id[0] = 0;
++		wacom->shared->stylus_in_proximity = false;
++	}
++
++	input_report_key(input, wacom->tool[0], prox); /* PEN or RUBBER */
++	input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */
++
++	return 1;
++}
++
+ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
+ {
+ 	if (len == WACOM_PKGLEN_BBTOUCH)
+ 		return wacom_bpt_touch(wacom);
++	else if (len == WACOM_PKGLEN_BBFUN)
++		return wacom_bpt_pen(wacom);
+ 
+ 	return 0;
+ }
+@@ -1193,6 +1258,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ 					     features->pressure_fuzz, 0);
+ 			input_set_abs_params(input_dev, ABS_MT_TRACKING_ID, 0,
+ 					     MAX_TRACKING_ID, 0, 0);
++		} else if (features->device_type == BTN_TOOL_PEN) {
++			__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
++			__set_bit(BTN_TOOL_PEN, input_dev->keybit);
++			__set_bit(BTN_STYLUS, input_dev->keybit);
++			__set_bit(BTN_STYLUS2, input_dev->keybit);
+ 		}
+ 		break;
+ 	}
+@@ -1334,6 +1404,12 @@ static const struct wacom_features wacom_features_0x47 =
+ 	{ "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023, 31, INTUOS };
+ static struct wacom_features wacom_features_0xD0 =
+ 	{ "Wacom Bamboo 2FG",     WACOM_PKGLEN_BBFUN,     14720,  9200, 1023, 63, BAMBOO_PT };
++static struct wacom_features wacom_features_0xD1 =
++	{ "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN,     14720,  9200, 1023, 63, BAMBOO_PT };
++static struct wacom_features wacom_features_0xD2 =
++	{ "Wacom Bamboo Craft",   WACOM_PKGLEN_BBFUN,     14720,  9200, 1023, 63, BAMBOO_PT };
++static struct wacom_features wacom_features_0xD3 =
++	{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN,     21648, 13530, 1023, 63, BAMBOO_PT };
+ 
+ #define USB_DEVICE_WACOM(prod)					\
+ 	USB_DEVICE(USB_VENDOR_ID_WACOM, prod),			\
+@@ -1399,6 +1475,9 @@ const struct usb_device_id wacom_ids[] = {
+ 	{ USB_DEVICE_WACOM(0xC7) },
+ 	{ USB_DEVICE_WACOM(0xCE) },
+ 	{ USB_DEVICE_WACOM(0xD0) },
++	{ USB_DEVICE_WACOM(0xD1) },
++	{ USB_DEVICE_WACOM(0xD2) },
++	{ USB_DEVICE_WACOM(0xD3) },
+ 	{ USB_DEVICE_WACOM(0xF0) },
+ 	{ USB_DEVICE_WACOM(0xCC) },
+ 	{ USB_DEVICE_WACOM(0x90) },
diff --git a/wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch b/wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch
new file mode 100644
index 000000000..e14954031
--- /dev/null
+++ b/wacom-09-disable-bamboo-touchpad-when-pen-is-being-used.patch
@@ -0,0 +1,31 @@
+From: Chris Bagwell <chris@cnpbagwell.com>
+Date: Sun, 12 Sep 2010 07:12:28 +0000 (-0700)
+Subject: Input: wacom - disable Bamboo touchpad when pen is being used
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdtor%2Finput.git;a=commitdiff_plain;h=33d5f713a19b0f5cb93e0594f7206d2730cf39da
+
+Input: wacom - disable Bamboo touchpad when pen is being used
+
+Signed-off-by: Chris Bagwell <chris@cnpbagwell.com>
+Acked-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+---
+
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 536156b..e1b65ba 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -866,7 +866,13 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
+ 	for (i = 0; i < 2; i++) {
+ 		int p = data[9 * i + 2];
+ 		input_mt_slot(input, i);
+-		if (p) {
++		/*
++		 * Touch events need to be disabled while stylus is
++		 * in proximity because user's hand is resting on touchpad
++		 * and sending unwanted events.  User expects tablet buttons
++		 * to continue working though.
++		 */
++		if (p && !wacom->shared->stylus_in_proximity) {
+ 			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
+ 			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+ 			if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
diff --git a/xhci_hcd-suspend-resume.patch b/xhci_hcd-suspend-resume.patch
new file mode 100644
index 000000000..2f8816973
--- /dev/null
+++ b/xhci_hcd-suspend-resume.patch
@@ -0,0 +1,1289 @@
+commit 5e5563661948c57f72cc16b3a0cc5dc205ed4900
+Author: Andiry Xu <andiry.xu@amd.com>
+Date:   Thu Oct 14 07:23:06 2010 -0700
+
+    USB: xHCI: PCI power management implementation
+    
+    This patch implements the PCI suspend/resume.
+    
+    Please refer to xHCI spec for doing the suspend/resume operation.
+    
+    For S3, CSS/SRS in USBCMD is used to save/restore the internal state.
+    However, an error maybe occurs while restoring the internal state.
+    In this case, it means that HC internal state is wrong and HC will be
+    re-initialized.
+    
+    Signed-off-by: Libin Yang <libin.yang@amd.com>
+    Signed-off-by: Dong Nguyen <dong.nguyen@amd.com>
+    Signed-off-by: Andiry Xu <andiry.xu@amd.com>
+    Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+commit 96520f33d383c9a3ba1ca571cac5fa75325728f5
+Author: Andiry Xu <andiry.xu@amd.com>
+Date:   Thu Oct 14 07:23:03 2010 -0700
+
+    USB: xHCI: bus power management implementation
+    
+    This patch implements xHCI bus suspend/resume function hook.
+    
+    In the patch it goes through all the ports and suspend/resume
+    the ports if needed.
+    
+    If any port is in remote wakeup, abort bus suspend as what ehci/ohci do.
+    
+    Signed-off-by: Libin Yang <libin.yang@amd.com>
+    Signed-off-by: Crane Cai <crane.cai@amd.com>
+    Signed-off-by: Andiry Xu <andiry.xu@amd.com>
+    Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+commit 7b29198e193ab6f5e8bfcd48c59340b8c7689f5c
+Author: Andiry Xu <andiry.xu@amd.com>
+Date:   Thu Oct 14 07:23:00 2010 -0700
+
+    USB: xHCI: port remote wakeup implementation
+    
+    This commit implements port remote wakeup.
+    
+    When a port is in U3 state and resume signaling is detected from a device,
+    the port transitions to the Resume state, and the xHC generates a Port Status
+    Change Event.
+    
+    For USB3 port, software write a '0' to the PLS field to complete the resume
+    signaling. For USB2 port, the resume should be signaling for at least 20ms,
+    irq handler set a timer for port remote wakeup, and then finishes process in
+    hub_control GetPortStatus.
+    
+    Some codes are borrowed from EHCI code.
+    
+    Signed-off-by: Andiry Xu <andiry.xu@amd.com>
+    Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+commit 9ada0dec259dfe796a757ff2c9b63a05e6408e5f
+Author: Andiry Xu <andiry.xu@amd.com>
+Date:   Thu Oct 14 07:22:57 2010 -0700
+
+    USB: xHCI: port power management implementation
+    
+    Add software trigger USB device suspend resume function hook.
+    Do port suspend & resume in terms of xHCI spec.
+    
+    Port Suspend:
+    Stop all endpoints via Stop Endpoint Command with Suspend (SP) flag set.
+    Place individual ports into suspend mode by writing '3' for Port Link State
+    (PLS) field into PORTSC register. This can only be done when the port is in
+    Enabled state. When writing, the Port Link State Write Strobe (LWS) bit shall
+    be set to '1'.
+    Allocate an xhci_command and stash it in xhci_virt_device to wait completion for
+    the last Stop Endpoint Command.  Use the Suspend bit in TRB to indicate the Stop
+    Endpoint Command is for port suspend. Based on Sarah's suggestion.
+    
+    Port Resume:
+    Write '0' in PLS field, device will transition to running state.
+    Ring an endpoints' doorbell to restart it.
+    
+    Ref: USB device remote wake need another patch to implement. For details of
+    how USB subsystem do power management, please see:
+        Documentation/usb/power-management.txt
+    
+    Signed-off-by: Crane Cai <crane.cai@amd.com>
+    Signed-off-by: Libin Yang <libin.yang@amd.com>
+    Signed-off-by: Andiry Xu <andiry.xu@amd.com>
+    Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
+    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+ drivers/usb/host/xhci-hub.c  |  424 +++++++++++++++++++++++++++++++++++++++++-
+ drivers/usb/host/xhci-mem.c  |    4 +
+ drivers/usb/host/xhci-pci.c  |   36 ++++-
+ drivers/usb/host/xhci-ring.c |  101 +++++++++-
+ drivers/usb/host/xhci.c      |  212 +++++++++++++++++++++-
+ drivers/usb/host/xhci.h      |   46 +++++-
+ 6 files changed, 805 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index a1a7a97..7f2f63c 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -24,6 +24,10 @@
+ 
+ #include "xhci.h"
+ 
++#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
++#define	PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
++			 PORT_RC | PORT_PLC | PORT_PE)
++
+ static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+ 		struct usb_hub_descriptor *desc)
+ {
+@@ -123,12 +127,105 @@ static unsigned int xhci_port_speed(unsigned int port_status)
+  * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+  * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+  */
+-static u32 xhci_port_state_to_neutral(u32 state)
++u32 xhci_port_state_to_neutral(u32 state)
+ {
+ 	/* Save read-only status and port state */
+ 	return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+ }
+ 
++/*
++ * find slot id based on port number.
++ */
++int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port)
++{
++	int slot_id;
++	int i;
++
++	slot_id = 0;
++	for (i = 0; i < MAX_HC_SLOTS; i++) {
++		if (!xhci->devs[i])
++			continue;
++		if (xhci->devs[i]->port == port) {
++			slot_id = i;
++			break;
++		}
++	}
++
++	return slot_id;
++}
++
++/*
++ * Stop device
++ * It issues stop endpoint command for EP 0 to 30. And wait the last command
++ * to complete.
++ * suspend will set to 1, if suspend bit need to set in command.
++ */
++static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
++{
++	struct xhci_virt_device *virt_dev;
++	struct xhci_command *cmd;
++	unsigned long flags;
++	int timeleft;
++	int ret;
++	int i;
++
++	ret = 0;
++	virt_dev = xhci->devs[slot_id];
++	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
++	if (!cmd) {
++		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
++		return -ENOMEM;
++	}
++
++	spin_lock_irqsave(&xhci->lock, flags);
++	for (i = LAST_EP_INDEX; i > 0; i--) {
++		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
++			xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
++	}
++	cmd->command_trb = xhci->cmd_ring->enqueue;
++	list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
++	xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
++	xhci_ring_cmd_db(xhci);
++	spin_unlock_irqrestore(&xhci->lock, flags);
++
++	/* Wait for last stop endpoint command to finish */
++	timeleft = wait_for_completion_interruptible_timeout(
++			cmd->completion,
++			USB_CTRL_SET_TIMEOUT);
++	if (timeleft <= 0) {
++		xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
++				timeleft == 0 ? "Timeout" : "Signal");
++		spin_lock_irqsave(&xhci->lock, flags);
++		/* The timeout might have raced with the event ring handler, so
++		 * only delete from the list if the item isn't poisoned.
++		 */
++		if (cmd->cmd_list.next != LIST_POISON1)
++			list_del(&cmd->cmd_list);
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		ret = -ETIME;
++		goto command_cleanup;
++	}
++
++command_cleanup:
++	xhci_free_command(xhci, cmd);
++	return ret;
++}
++
++/*
++ * Ring device, it rings the all doorbells unconditionally.
++ */
++void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
++{
++	int i;
++
++	for (i = 0; i < LAST_EP_INDEX + 1; i++)
++		if (xhci->devs[slot_id]->eps[i].ring &&
++		    xhci->devs[slot_id]->eps[i].ring->dequeue)
++			xhci_ring_ep_doorbell(xhci, slot_id, i, 0);
++
++	return;
++}
++
+ static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
+ 		u32 __iomem *addr, u32 port_status)
+ {
+@@ -162,6 +259,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ 		status = PORT_PEC;
+ 		port_change_bit = "enable/disable";
+ 		break;
++	case USB_PORT_FEAT_C_SUSPEND:
++		status = PORT_PLC;
++		port_change_bit = "suspend/resume";
++		break;
+ 	default:
+ 		/* Should never happen */
+ 		return;
+@@ -179,9 +280,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+ 	int ports;
+ 	unsigned long flags;
+-	u32 temp, status;
++	u32 temp, temp1, status;
+ 	int retval = 0;
+ 	u32 __iomem *addr;
++	int slot_id;
+ 
+ 	ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ 
+@@ -211,9 +313,49 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		if ((temp & PORT_OCC))
+ 			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ 		/*
+-		 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
++		 * FIXME ignoring reset and USB 2.1/3.0 specific
+ 		 * changes
+ 		 */
++		if ((temp & PORT_PLS_MASK) == XDEV_U3
++			&& (temp & PORT_POWER))
++			status |= 1 << USB_PORT_FEAT_SUSPEND;
++		if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
++			if ((temp & PORT_RESET) || !(temp & PORT_PE))
++				goto error;
++			if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
++						xhci->resume_done[wIndex])) {
++				xhci_dbg(xhci, "Resume USB2 port %d\n",
++					wIndex + 1);
++				xhci->resume_done[wIndex] = 0;
++				temp1 = xhci_port_state_to_neutral(temp);
++				temp1 &= ~PORT_PLS_MASK;
++				temp1 |= PORT_LINK_STROBE | XDEV_U0;
++				xhci_writel(xhci, temp1, addr);
++
++				xhci_dbg(xhci, "set port %d resume\n",
++					wIndex + 1);
++				slot_id = xhci_find_slot_id_by_port(xhci,
++								 wIndex + 1);
++				if (!slot_id) {
++					xhci_dbg(xhci, "slot_id is zero\n");
++					goto error;
++				}
++				xhci_ring_device(xhci, slot_id);
++				xhci->port_c_suspend[wIndex >> 5] |=
++						1 << (wIndex & 31);
++				xhci->suspended_ports[wIndex >> 5] &=
++						~(1 << (wIndex & 31));
++			}
++		}
++		if ((temp & PORT_PLS_MASK) == XDEV_U0
++			&& (temp & PORT_POWER)
++			&& (xhci->suspended_ports[wIndex >> 5] &
++			    (1 << (wIndex & 31)))) {
++			xhci->suspended_ports[wIndex >> 5] &=
++					~(1 << (wIndex & 31));
++			xhci->port_c_suspend[wIndex >> 5] |=
++					1 << (wIndex & 31);
++		}
+ 		if (temp & PORT_CONNECT) {
+ 			status |= USB_PORT_STAT_CONNECTION;
+ 			status |= xhci_port_speed(temp);
+@@ -226,6 +368,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 			status |= USB_PORT_STAT_RESET;
+ 		if (temp & PORT_POWER)
+ 			status |= USB_PORT_STAT_POWER;
++		if (xhci->port_c_suspend[wIndex >> 5] & (1 << (wIndex & 31)))
++			status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ 		xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ 		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ 		break;
+@@ -238,6 +382,42 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		temp = xhci_readl(xhci, addr);
+ 		temp = xhci_port_state_to_neutral(temp);
+ 		switch (wValue) {
++		case USB_PORT_FEAT_SUSPEND:
++			temp = xhci_readl(xhci, addr);
++			/* In spec software should not attempt to suspend
++			 * a port unless the port reports that it is in the
++			 * enabled (PED = ‘1’,PLS < ‘3’) state.
++			 */
++			if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
++				|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
++				xhci_warn(xhci, "USB core suspending device "
++					  "not in U0/U1/U2.\n");
++				goto error;
++			}
++
++			slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
++			if (!slot_id) {
++				xhci_warn(xhci, "slot_id is zero\n");
++				goto error;
++			}
++			/* unlock to execute stop endpoint commands */
++			spin_unlock_irqrestore(&xhci->lock, flags);
++			xhci_stop_device(xhci, slot_id, 1);
++			spin_lock_irqsave(&xhci->lock, flags);
++
++			temp = xhci_port_state_to_neutral(temp);
++			temp &= ~PORT_PLS_MASK;
++			temp |= PORT_LINK_STROBE | XDEV_U3;
++			xhci_writel(xhci, temp, addr);
++
++			spin_unlock_irqrestore(&xhci->lock, flags);
++			msleep(10); /* wait device to enter */
++			spin_lock_irqsave(&xhci->lock, flags);
++
++			temp = xhci_readl(xhci, addr);
++			xhci->suspended_ports[wIndex >> 5] |=
++					1 << (wIndex & (31));
++			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			/*
+ 			 * Turn on ports, even if there isn't per-port switching.
+@@ -271,6 +451,52 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		temp = xhci_readl(xhci, addr);
+ 		temp = xhci_port_state_to_neutral(temp);
+ 		switch (wValue) {
++		case USB_PORT_FEAT_SUSPEND:
++			temp = xhci_readl(xhci, addr);
++			xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
++			xhci_dbg(xhci, "PORTSC %04x\n", temp);
++			if (temp & PORT_RESET)
++				goto error;
++			if (temp & XDEV_U3) {
++				if ((temp & PORT_PE) == 0)
++					goto error;
++				if (DEV_SUPERSPEED(temp)) {
++					temp = xhci_port_state_to_neutral(temp);
++					temp &= ~PORT_PLS_MASK;
++					temp |= PORT_LINK_STROBE | XDEV_U0;
++					xhci_writel(xhci, temp, addr);
++					xhci_readl(xhci, addr);
++				} else {
++					temp = xhci_port_state_to_neutral(temp);
++					temp &= ~PORT_PLS_MASK;
++					temp |= PORT_LINK_STROBE | XDEV_RESUME;
++					xhci_writel(xhci, temp, addr);
++
++					spin_unlock_irqrestore(&xhci->lock,
++							       flags);
++					msleep(20);
++					spin_lock_irqsave(&xhci->lock, flags);
++
++					temp = xhci_readl(xhci, addr);
++					temp = xhci_port_state_to_neutral(temp);
++					temp &= ~PORT_PLS_MASK;
++					temp |= PORT_LINK_STROBE | XDEV_U0;
++					xhci_writel(xhci, temp, addr);
++				}
++				xhci->port_c_suspend[wIndex >> 5] |=
++						1 << (wIndex & 31);
++			}
++
++			slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
++			if (!slot_id) {
++				xhci_dbg(xhci, "slot_id is zero\n");
++				goto error;
++			}
++			xhci_ring_device(xhci, slot_id);
++			break;
++		case USB_PORT_FEAT_C_SUSPEND:
++			xhci->port_c_suspend[wIndex >> 5] &=
++					~(1 << (wIndex & 31));
+ 		case USB_PORT_FEAT_C_RESET:
+ 		case USB_PORT_FEAT_C_CONNECTION:
+ 		case USB_PORT_FEAT_C_OVER_CURRENT:
+@@ -306,6 +532,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ {
+ 	unsigned long flags;
+ 	u32 temp, status;
++	u32 mask;
+ 	int i, retval;
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+ 	int ports;
+@@ -318,13 +545,18 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ 	memset(buf, 0, retval);
+ 	status = 0;
+ 
++	mask = PORT_CSC | PORT_PEC | PORT_OCC;
++
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	/* For each port, did anything change?  If so, set that bit in buf. */
+ 	for (i = 0; i < ports; i++) {
+ 		addr = &xhci->op_regs->port_status_base +
+ 			NUM_PORT_REGS*i;
+ 		temp = xhci_readl(xhci, addr);
+-		if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
++		if ((temp & mask) != 0 ||
++			(xhci->port_c_suspend[i >> 5] &	1 << (i & 31)) ||
++			(xhci->resume_done[i] && time_after_eq(
++			    jiffies, xhci->resume_done[i]))) {
+ 			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ 			status = 1;
+ 		}
+@@ -332,3 +564,187 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 	return status ? retval : 0;
+ }
++
++#ifdef CONFIG_PM
++
++int xhci_bus_suspend(struct usb_hcd *hcd)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	int port;
++	unsigned long flags;
++
++	xhci_dbg(xhci, "suspend root hub\n");
++
++	spin_lock_irqsave(&xhci->lock, flags);
++
++	if (hcd->self.root_hub->do_remote_wakeup) {
++		port = HCS_MAX_PORTS(xhci->hcs_params1);
++		while (port--) {
++			if (xhci->resume_done[port] != 0) {
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				xhci_dbg(xhci, "suspend failed because "
++						"port %d is resuming\n",
++						port + 1);
++				return -EBUSY;
++			}
++		}
++	}
++
++	port = HCS_MAX_PORTS(xhci->hcs_params1);
++	xhci->bus_suspended = 0;
++	while (port--) {
++		/* suspend the port if the port is not suspended */
++		u32 __iomem *addr;
++		u32 t1, t2;
++		int slot_id;
++
++		addr = &xhci->op_regs->port_status_base +
++			NUM_PORT_REGS * (port & 0xff);
++		t1 = xhci_readl(xhci, addr);
++		t2 = xhci_port_state_to_neutral(t1);
++
++		if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
++			xhci_dbg(xhci, "port %d not suspended\n", port);
++			slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
++			if (slot_id) {
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				xhci_stop_device(xhci, slot_id, 1);
++				spin_lock_irqsave(&xhci->lock, flags);
++			}
++			t2 &= ~PORT_PLS_MASK;
++			t2 |= PORT_LINK_STROBE | XDEV_U3;
++			set_bit(port, &xhci->bus_suspended);
++		}
++		if (hcd->self.root_hub->do_remote_wakeup) {
++			if (t1 & PORT_CONNECT) {
++				t2 |= PORT_WKOC_E | PORT_WKDISC_E;
++				t2 &= ~PORT_WKCONN_E;
++			} else {
++				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
++				t2 &= ~PORT_WKDISC_E;
++			}
++		} else
++			t2 &= ~PORT_WAKE_BITS;
++
++		t1 = xhci_port_state_to_neutral(t1);
++		if (t1 != t2)
++			xhci_writel(xhci, t2, addr);
++
++		if (DEV_HIGHSPEED(t1)) {
++			/* enable remote wake up for USB 2.0 */
++			u32 __iomem *addr;
++			u32 tmp;
++
++			addr = &xhci->op_regs->port_power_base +
++				NUM_PORT_REGS * (port & 0xff);
++			tmp = xhci_readl(xhci, addr);
++			tmp |= PORT_RWE;
++			xhci_writel(xhci, tmp, addr);
++		}
++	}
++	hcd->state = HC_STATE_SUSPENDED;
++	xhci->next_statechange = jiffies + msecs_to_jiffies(10);
++	spin_unlock_irqrestore(&xhci->lock, flags);
++	return 0;
++}
++
++int xhci_bus_resume(struct usb_hcd *hcd)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	int port;
++	u32 temp;
++	unsigned long flags;
++
++	xhci_dbg(xhci, "resume root hub\n");
++
++	if (time_before(jiffies, xhci->next_statechange))
++		msleep(5);
++
++	spin_lock_irqsave(&xhci->lock, flags);
++	if (!HCD_HW_ACCESSIBLE(hcd)) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		return -ESHUTDOWN;
++	}
++
++	/* delay the irqs */
++	temp = xhci_readl(xhci, &xhci->op_regs->command);
++	temp &= ~CMD_EIE;
++	xhci_writel(xhci, temp, &xhci->op_regs->command);
++
++	port = HCS_MAX_PORTS(xhci->hcs_params1);
++	while (port--) {
++		/* Check whether need resume ports. If needed
++		   resume port and disable remote wakeup */
++		u32 __iomem *addr;
++		u32 temp;
++		int slot_id;
++
++		addr = &xhci->op_regs->port_status_base +
++			NUM_PORT_REGS * (port & 0xff);
++		temp = xhci_readl(xhci, addr);
++		if (DEV_SUPERSPEED(temp))
++			temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
++		else
++			temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++		if (test_bit(port, &xhci->bus_suspended) &&
++		    (temp & PORT_PLS_MASK)) {
++			if (DEV_SUPERSPEED(temp)) {
++				temp = xhci_port_state_to_neutral(temp);
++				temp &= ~PORT_PLS_MASK;
++				temp |= PORT_LINK_STROBE | XDEV_U0;
++				xhci_writel(xhci, temp, addr);
++			} else {
++				temp = xhci_port_state_to_neutral(temp);
++				temp &= ~PORT_PLS_MASK;
++				temp |= PORT_LINK_STROBE | XDEV_RESUME;
++				xhci_writel(xhci, temp, addr);
++
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				msleep(20);
++				spin_lock_irqsave(&xhci->lock, flags);
++
++				temp = xhci_readl(xhci, addr);
++				temp = xhci_port_state_to_neutral(temp);
++				temp &= ~PORT_PLS_MASK;
++				temp |= PORT_LINK_STROBE | XDEV_U0;
++				xhci_writel(xhci, temp, addr);
++			}
++			slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
++			if (slot_id)
++				xhci_ring_device(xhci, slot_id);
++		} else
++			xhci_writel(xhci, temp, addr);
++
++		if (DEV_HIGHSPEED(temp)) {
++			/* disable remote wake up for USB 2.0 */
++			u32 __iomem *addr;
++			u32 tmp;
++
++			addr = &xhci->op_regs->port_power_base +
++				NUM_PORT_REGS * (port & 0xff);
++			tmp = xhci_readl(xhci, addr);
++			tmp &= ~PORT_RWE;
++			xhci_writel(xhci, tmp, addr);
++		}
++	}
++
++	(void) xhci_readl(xhci, &xhci->op_regs->command);
++
++	xhci->next_statechange = jiffies + msecs_to_jiffies(5);
++	hcd->state = HC_STATE_RUNNING;
++	/* re-enable irqs */
++	temp = xhci_readl(xhci, &xhci->op_regs->command);
++	temp |= CMD_EIE;
++	xhci_writel(xhci, temp, &xhci->op_regs->command);
++	temp = xhci_readl(xhci, &xhci->op_regs->command);
++
++	spin_unlock_irqrestore(&xhci->lock, flags);
++	return 0;
++}
++
++#else
++
++#define	xhci_bus_suspend	NULL
++#define	xhci_bus_resume		NULL
++
++#endif
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 4e51343..cef8d81 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -866,6 +866,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
+ 			top_dev = top_dev->parent)
+ 		/* Found device below root hub */;
+ 	slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
++	dev->port = top_dev->portnum;
+ 	xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+ 
+ 	/* Is this a LS/FS device under a HS hub? */
+@@ -1443,6 +1444,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	scratchpad_free(xhci);
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
++	xhci->bus_suspended = 0;
+ }
+ 
+ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
+@@ -1801,6 +1803,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	init_completion(&xhci->addr_dev);
+ 	for (i = 0; i < MAX_HC_SLOTS; ++i)
+ 		xhci->devs[i] = NULL;
++	for (i = 0; i < MAX_HC_PORTS; ++i)
++		xhci->resume_done[i] = 0;
+ 
+ 	if (scratchpad_alloc(xhci, flags))
+ 		goto fail;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f7efe02..e3a5924 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -116,6 +116,30 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
+ 	return xhci_pci_reinit(xhci, pdev);
+ }
+ 
++#ifdef CONFIG_PM
++static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	int	retval = 0;
++
++	if (hcd->state != HC_STATE_SUSPENDED)
++		return -EINVAL;
++
++	retval = xhci_suspend(xhci);
++
++	return retval;
++}
++
++static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
++{
++	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
++	int			retval = 0;
++
++	retval = xhci_resume(xhci, hibernated);
++	return retval;
++}
++#endif /* CONFIG_PM */
++
+ static const struct hc_driver xhci_pci_hc_driver = {
+ 	.description =		hcd_name,
+ 	.product_desc =		"xHCI Host Controller",
+@@ -132,7 +156,10 @@ static const struct hc_driver xhci_pci_hc_driver = {
+ 	 */
+ 	.reset =		xhci_pci_setup,
+ 	.start =		xhci_run,
+-	/* suspend and resume implemented later */
++#ifdef CONFIG_PM
++	.pci_suspend =          xhci_pci_suspend,
++	.pci_resume =           xhci_pci_resume,
++#endif
+ 	.stop =			xhci_stop,
+ 	.shutdown =		xhci_shutdown,
+ 
+@@ -162,6 +189,8 @@ static const struct hc_driver xhci_pci_hc_driver = {
+ 	/* Root hub support */
+ 	.hub_control =		xhci_hub_control,
+ 	.hub_status_data =	xhci_hub_status_data,
++	.bus_suspend =		xhci_bus_suspend,
++	.bus_resume =		xhci_bus_resume,
+ };
+ 
+ /*-------------------------------------------------------------------------*/
+@@ -186,6 +215,11 @@ static struct pci_driver xhci_pci_driver = {
+ 	/* suspend and resume implemented later */
+ 
+ 	.shutdown = 	usb_hcd_pci_shutdown,
++#ifdef CONFIG_PM_SLEEP
++	.driver = {
++		.pm = &usb_hcd_pci_pm_ops
++	},
++#endif
+ };
+ 
+ int xhci_register_pci(void)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 48e60d1..9f3115e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -68,6 +68,10 @@
+ #include <linux/slab.h>
+ #include "xhci.h"
+ 
++static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
++		struct xhci_virt_device *virt_dev,
++		struct xhci_event_cmd *event);
++
+ /*
+  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+  * address of the TRB.
+@@ -313,7 +317,7 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ 	xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ }
+ 
+-static void ring_ep_doorbell(struct xhci_hcd *xhci,
++void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
+ 		unsigned int slot_id,
+ 		unsigned int ep_index,
+ 		unsigned int stream_id)
+@@ -353,7 +357,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ 	/* A ring has pending URBs if its TD list is not empty */
+ 	if (!(ep->ep_state & EP_HAS_STREAMS)) {
+ 		if (!(list_empty(&ep->ring->td_list)))
+-			ring_ep_doorbell(xhci, slot_id, ep_index, 0);
++			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ 		return;
+ 	}
+ 
+@@ -361,7 +365,8 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ 			stream_id++) {
+ 		struct xhci_stream_info *stream_info = ep->stream_info;
+ 		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
+-			ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
++			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
++						stream_id);
+ 	}
+ }
+ 
+@@ -626,10 +631,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+  *     bit cleared) so that the HW will skip over them.
+  */
+ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+-		union xhci_trb *trb)
++		union xhci_trb *trb, struct xhci_event_cmd *event)
+ {
+ 	unsigned int slot_id;
+ 	unsigned int ep_index;
++	struct xhci_virt_device *virt_dev;
+ 	struct xhci_ring *ep_ring;
+ 	struct xhci_virt_ep *ep;
+ 	struct list_head *entry;
+@@ -638,6 +644,21 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ 
+ 	struct xhci_dequeue_state deq_state;
+ 
++	if (unlikely(TRB_TO_SUSPEND_PORT(
++			xhci->cmd_ring->dequeue->generic.field[3]))) {
++		slot_id = TRB_TO_SLOT_ID(
++			xhci->cmd_ring->dequeue->generic.field[3]);
++		virt_dev = xhci->devs[slot_id];
++		if (virt_dev)
++			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
++				event);
++		else
++			xhci_warn(xhci, "Stop endpoint command "
++				"completion for disabled slot %u\n",
++				slot_id);
++		return;
++	}
++
+ 	memset(&deq_state, 0, sizeof(deq_state));
+ 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+ 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+@@ -1091,7 +1112,7 @@ bandwidth_change:
+ 		complete(&xhci->addr_dev);
+ 		break;
+ 	case TRB_TYPE(TRB_STOP_RING):
+-		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
++		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
+ 		break;
+ 	case TRB_TYPE(TRB_SET_DEQ):
+ 		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+@@ -1144,17 +1165,72 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
+ static void handle_port_status(struct xhci_hcd *xhci,
+ 		union xhci_trb *event)
+ {
++	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ 	u32 port_id;
++	u32 temp, temp1;
++	u32 __iomem *addr;
++	int ports;
++	int slot_id;
+ 
+ 	/* Port status change events always have a successful completion code */
+ 	if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+ 		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+ 		xhci->error_bitmask |= 1 << 8;
+ 	}
+-	/* FIXME: core doesn't care about all port link state changes yet */
+ 	port_id = GET_PORT_ID(event->generic.field[0]);
+ 	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+ 
++	ports = HCS_MAX_PORTS(xhci->hcs_params1);
++	if ((port_id <= 0) || (port_id > ports)) {
++		xhci_warn(xhci, "Invalid port id %d\n", port_id);
++		goto cleanup;
++	}
++
++	addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
++	temp = xhci_readl(xhci, addr);
++	if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
++		xhci_dbg(xhci, "resume root hub\n");
++		usb_hcd_resume_root_hub(hcd);
++	}
++
++	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
++		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
++
++		temp1 = xhci_readl(xhci, &xhci->op_regs->command);
++		if (!(temp1 & CMD_RUN)) {
++			xhci_warn(xhci, "xHC is not running.\n");
++			goto cleanup;
++		}
++
++		if (DEV_SUPERSPEED(temp)) {
++			xhci_dbg(xhci, "resume SS port %d\n", port_id);
++			temp = xhci_port_state_to_neutral(temp);
++			temp &= ~PORT_PLS_MASK;
++			temp |= PORT_LINK_STROBE | XDEV_U0;
++			xhci_writel(xhci, temp, addr);
++			slot_id = xhci_find_slot_id_by_port(xhci, port_id);
++			if (!slot_id) {
++				xhci_dbg(xhci, "slot_id is zero\n");
++				goto cleanup;
++			}
++			xhci_ring_device(xhci, slot_id);
++			xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
++			/* Clear PORT_PLC */
++			temp = xhci_readl(xhci, addr);
++			temp = xhci_port_state_to_neutral(temp);
++			temp |= PORT_PLC;
++			xhci_writel(xhci, temp, addr);
++		} else {
++			xhci_dbg(xhci, "resume HS port %d\n", port_id);
++			xhci->resume_done[port_id - 1] = jiffies +
++				msecs_to_jiffies(20);
++			mod_timer(&hcd->rh_timer,
++				  xhci->resume_done[port_id - 1]);
++			/* Do the rest in GetPortStatus */
++		}
++	}
++
++cleanup:
+ 	/* Update event ring dequeue pointer before dropping the lock */
+ 	inc_deq(xhci, xhci->event_ring, true);
+ 
+@@ -2347,7 +2423,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+ 	 */
+ 	wmb();
+ 	start_trb->field[3] |= start_cycle;
+-	ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
++	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+ }
+ 
+ /*
+@@ -2931,7 +3007,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	wmb();
+ 	start_trb->field[3] |= start_cycle;
+ 
+-	ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
++	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+ 	return 0;
+ }
+ 
+@@ -3108,15 +3184,20 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ 			false);
+ }
+ 
++/*
++ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
++ * activity on an endpoint that is about to be suspended.
++ */
+ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+-		unsigned int ep_index)
++		unsigned int ep_index, int suspend)
+ {
+ 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ 	u32 type = TRB_TYPE(TRB_STOP_RING);
++	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
+ 
+ 	return queue_command(xhci, 0, 0, 0,
+-			trb_slot_id | trb_ep_index | type, false);
++			trb_slot_id | trb_ep_index | type | trb_suspend, false);
+ }
+ 
+ /* Set Transfer Ring Dequeue Pointer command.
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index d5c550e..34f1b3b 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -551,6 +551,216 @@ void xhci_shutdown(struct usb_hcd *hcd)
+ 		    xhci_readl(xhci, &xhci->op_regs->status));
+ }
+ 
++static void xhci_save_registers(struct xhci_hcd *xhci)
++{
++	xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
++	xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
++	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
++	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
++	xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
++	xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
++	xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
++	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
++	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
++}
++
++static void xhci_restore_registers(struct xhci_hcd *xhci)
++{
++	xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
++	xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
++	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
++	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
++	xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
++	xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
++	xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
++	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
++}
++
++/*
++ * Stop HC (not bus-specific)
++ *
++ * This is called when the machine transition into S3/S4 mode.
++ *
++ */
++int xhci_suspend(struct xhci_hcd *xhci)
++{
++	int			rc = 0;
++	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
++	u32			command;
++
++	spin_lock_irq(&xhci->lock);
++	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
++	/* step 1: stop endpoint */
++	/* skipped assuming that port suspend has done */
++
++	/* step 2: clear Run/Stop bit */
++	command = xhci_readl(xhci, &xhci->op_regs->command);
++	command &= ~CMD_RUN;
++	xhci_writel(xhci, command, &xhci->op_regs->command);
++	if (handshake(xhci, &xhci->op_regs->status,
++		      STS_HALT, STS_HALT, 100*100)) {
++		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
++		spin_unlock_irq(&xhci->lock);
++		return -ETIMEDOUT;
++	}
++
++	/* step 3: save registers */
++	xhci_save_registers(xhci);
++
++	/* step 4: set CSS flag */
++	command = xhci_readl(xhci, &xhci->op_regs->command);
++	command |= CMD_CSS;
++	xhci_writel(xhci, command, &xhci->op_regs->command);
++	if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
++		xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
++		spin_unlock_irq(&xhci->lock);
++		return -ETIMEDOUT;
++	}
++	/* step 5: remove core well power */
++	xhci_cleanup_msix(xhci);
++	spin_unlock_irq(&xhci->lock);
++
++	return rc;
++}
++
++/*
++ * start xHC (not bus-specific)
++ *
++ * This is called when the machine transition from S3/S4 mode.
++ *
++ */
++int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
++{
++	u32			command, temp = 0;
++	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
++	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
++	u64	val_64;
++	int	old_state, retval;
++
++	old_state = hcd->state;
++	if (time_before(jiffies, xhci->next_statechange))
++		msleep(100);
++
++	spin_lock_irq(&xhci->lock);
++
++	if (!hibernated) {
++		/* step 1: restore register */
++		xhci_restore_registers(xhci);
++		/* step 2: initialize command ring buffer */
++		val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
++		val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
++			 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
++					       xhci->cmd_ring->dequeue) &
++			 (u64) ~CMD_RING_RSVD_BITS) |
++			 xhci->cmd_ring->cycle_state;
++		xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
++				(long unsigned long) val_64);
++		xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
++		/* step 3: restore state and start state*/
++		/* step 3: set CRS flag */
++		command = xhci_readl(xhci, &xhci->op_regs->command);
++		command |= CMD_CRS;
++		xhci_writel(xhci, command, &xhci->op_regs->command);
++		if (handshake(xhci, &xhci->op_regs->status,
++			      STS_RESTORE, 0, 10*100)) {
++			xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
++			spin_unlock_irq(&xhci->lock);
++			return -ETIMEDOUT;
++		}
++		temp = xhci_readl(xhci, &xhci->op_regs->status);
++	}
++
++	/* If restore operation fails, re-initialize the HC during resume */
++	if ((temp & STS_SRE) || hibernated) {
++		usb_root_hub_lost_power(hcd->self.root_hub);
++
++		xhci_dbg(xhci, "Stop HCD\n");
++		xhci_halt(xhci);
++		xhci_reset(xhci);
++		if (hibernated)
++			xhci_cleanup_msix(xhci);
++		spin_unlock_irq(&xhci->lock);
++
++#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
++		/* Tell the event ring poll function not to reschedule */
++		xhci->zombie = 1;
++		del_timer_sync(&xhci->event_ring_timer);
++#endif
++
++		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
++		temp = xhci_readl(xhci, &xhci->op_regs->status);
++		xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
++		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
++		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
++				&xhci->ir_set->irq_pending);
++		xhci_print_ir_set(xhci, xhci->ir_set, 0);
++
++		xhci_dbg(xhci, "cleaning up memory\n");
++		xhci_mem_cleanup(xhci);
++		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
++			    xhci_readl(xhci, &xhci->op_regs->status));
++
++		xhci_dbg(xhci, "Initialize the HCD\n");
++		retval = xhci_init(hcd);
++		if (retval)
++			return retval;
++
++		xhci_dbg(xhci, "Start the HCD\n");
++		retval = xhci_run(hcd);
++		if (!retval)
++			set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
++		hcd->state = HC_STATE_SUSPENDED;
++		return retval;
++	}
++
++	/* Re-setup MSI-X */
++	if (hcd->irq)
++		free_irq(hcd->irq, hcd);
++	hcd->irq = -1;
++
++	retval = xhci_setup_msix(xhci);
++	if (retval)
++		/* fall back to msi*/
++		retval = xhci_setup_msi(xhci);
++
++	if (retval) {
++		/* fall back to legacy interrupt*/
++		retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
++					hcd->irq_descr, hcd);
++		if (retval) {
++			xhci_err(xhci, "request interrupt %d failed\n",
++					pdev->irq);
++			return retval;
++		}
++		hcd->irq = pdev->irq;
++	}
++
++	/* step 4: set Run/Stop bit */
++	command = xhci_readl(xhci, &xhci->op_regs->command);
++	command |= CMD_RUN;
++	xhci_writel(xhci, command, &xhci->op_regs->command);
++	handshake(xhci, &xhci->op_regs->status, STS_HALT,
++		  0, 250 * 1000);
++
++	/* step 5: walk topology and initialize portsc,
++	 * portpmsc and portli
++	 */
++	/* this is done in bus_resume */
++
++	/* step 6: restart each of the previously
++	 * Running endpoints by ringing their doorbells
++	 */
++
++	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
++	if (!hibernated)
++		hcd->state = old_state;
++	else
++		hcd->state = HC_STATE_SUSPENDED;
++
++	spin_unlock_irq(&xhci->lock);
++	return 0;
++}
++
+ /*-------------------------------------------------------------------------*/
+ 
+ /**
+@@ -956,7 +1166,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		ep->stop_cmd_timer.expires = jiffies +
+ 			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+ 		add_timer(&ep->stop_cmd_timer);
+-		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
++		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
+ 		xhci_ring_cmd_db(xhci);
+ 	}
+ done:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 34a60d9..b6d8033 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -191,7 +191,7 @@ struct xhci_op_regs {
+ /* bits 4:6 are reserved (and should be preserved on writes). */
+ /* light reset (port status stays unchanged) - reset completed when this is 0 */
+ #define CMD_LRESET	(1 << 7)
+-/* FIXME: ignoring host controller save/restore state for now. */
++/* host controller save/restore state. */
+ #define CMD_CSS		(1 << 8)
+ #define CMD_CRS		(1 << 9)
+ /* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+@@ -269,6 +269,10 @@ struct xhci_op_regs {
+  * A read gives the current link PM state of the port,
+  * a write with Link State Write Strobe set sets the link state.
+  */
++#define PORT_PLS_MASK	(0xf << 5)
++#define XDEV_U0		(0x0 << 5)
++#define XDEV_U3		(0x3 << 5)
++#define XDEV_RESUME	(0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER	(1 << 9)
+ /* bits 10:13 indicate device speed:
+@@ -353,6 +357,8 @@ struct xhci_op_regs {
+ #define PORT_U2_TIMEOUT(p)	(((p) & 0xff) << 8)
+ /* Bits 24:31 for port testing */
+ 
++/* USB2 Protocol PORTSPMSC */
++#define PORT_RWE	(1 << 0x3)
+ 
+ /**
+  * struct xhci_intr_reg - Interrupt Register Set
+@@ -510,6 +516,7 @@ struct xhci_slot_ctx {
+ #define MAX_EXIT	(0xffff)
+ /* Root hub port number that is needed to access the USB device */
+ #define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
++#define DEVINFO_TO_ROOT_HUB_PORT(p)	(((p) >> 16) & 0xff)
+ /* Maximum number of ports under a hub device */
+ #define XHCI_MAX_PORTS(p)	(((p) & 0xff) << 24)
+ 
+@@ -751,6 +758,7 @@ struct xhci_virt_device {
+ 	/* Status of the last command issued for this device */
+ 	u32				cmd_status;
+ 	struct list_head		cmd_list;
++	u8				port;
+ };
+ 
+ 
+@@ -881,6 +889,10 @@ struct xhci_event_cmd {
+ #define TRB_TO_EP_INDEX(p)		((((p) & (0x1f << 16)) >> 16) - 1)
+ #define	EP_ID_FOR_TRB(p)		((((p) + 1) & 0x1f) << 16)
+ 
++#define SUSPEND_PORT_FOR_TRB(p)		(((p) & 1) << 23)
++#define TRB_TO_SUSPEND_PORT(p)		(((p) & (1 << 23)) >> 23)
++#define LAST_EP_INDEX			30
++
+ /* Set TR Dequeue Pointer command TRB fields */
+ #define TRB_TO_STREAM_ID(p)		((((p) & (0xffff << 16)) >> 16))
+ #define STREAM_ID_FOR_TRB(p)		((((p)) & 0xffff) << 16)
+@@ -1115,6 +1127,17 @@ struct urb_priv {
+ #define XHCI_STOP_EP_CMD_TIMEOUT	5
+ /* XXX: Make these module parameters */
+ 
++struct s3_save {
++	u32	command;
++	u32	dev_nt;
++	u64	dcbaa_ptr;
++	u32	config_reg;
++	u32	irq_pending;
++	u32	irq_control;
++	u32	erst_size;
++	u64	erst_base;
++	u64	erst_dequeue;
++};
+ 
+ /* There is one ehci_hci structure per controller */
+ struct xhci_hcd {
+@@ -1178,6 +1201,12 @@ struct xhci_hcd {
+ #endif
+ 	/* Host controller watchdog timer structures */
+ 	unsigned int		xhc_state;
++
++	unsigned long		bus_suspended;
++	unsigned long		next_statechange;
++
++	u32			command;
++	struct s3_save		s3;
+ /* Host controller is dying - not responding to commands. "I'm not dead yet!"
+  *
+  * xHC interrupts have been disabled and a watchdog timer will (or has already)
+@@ -1199,6 +1228,10 @@ struct xhci_hcd {
+	/* Array of pointers to USB 2.0 PORTSC registers */
+	u32 __iomem		**usb2_ports;
+	unsigned int		num_usb2_ports;
++	u32			port_c_suspend[8];	/* port suspend change*/
++	u32			suspended_ports[8];	/* which ports are
++							   suspended */
++	unsigned long		resume_done[MAX_HC_PORTS];
+ };
+ 
+ /* For testing purposes */
+@@ -1369,6 +1402,8 @@ int xhci_init(struct usb_hcd *hcd);
+ int xhci_run(struct usb_hcd *hcd);
+ void xhci_stop(struct usb_hcd *hcd);
+ void xhci_shutdown(struct usb_hcd *hcd);
++int xhci_suspend(struct xhci_hcd *xhci);
++int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
+ int xhci_get_frame(struct usb_hcd *hcd);
+ irqreturn_t xhci_irq(struct usb_hcd *hcd);
+ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
+@@ -1406,7 +1441,7 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+ 		u32 field1, u32 field2, u32 field3, u32 field4);
+ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+-		unsigned int ep_index);
++		unsigned int ep_index, int suspend);
+ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ 		int slot_id, unsigned int ep_index);
+ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+@@ -1436,11 +1471,18 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ 		unsigned int slot_id, unsigned int ep_index,
+ 		struct xhci_dequeue_state *deq_state);
+ void xhci_stop_endpoint_command_watchdog(unsigned long arg);
++void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
++		unsigned int ep_index, unsigned int stream_id);
+ 
+ /* xHCI roothub code */
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ 		char *buf, u16 wLength);
+ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
++int xhci_bus_suspend(struct usb_hcd *hcd);
++int xhci_bus_resume(struct usb_hcd *hcd);
++u32 xhci_port_state_to_neutral(u32 state);
++int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port);
++void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
+ 
+ /* xHCI contexts */
+ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);