Merge branch 'button-cleanup' into release
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
index 9146952..0c49c19 100644
--- a/Documentation/acpi/apei/output_format.txt
+++ b/Documentation/acpi/apei/output_format.txt
@@ -92,6 +92,11 @@
 class_code: <integer>]
 [serial number: <integer>, <integer>]
 [bridge: secondary_status: <integer>, control: <integer>]
+[aer_status: <integer>, aer_mask: <integer>
+<aer status string>
+[aer_uncor_severity: <integer>]
+aer_layer=<aer layer string>, aer_agent=<aer agent string>
+aer_tlp_header: <integer> <integer> <integer> <integer>]
 
 <pcie port type string>* := PCIe end point | legacy PCI end point | \
 unknown | unknown | root port | upstream switch port | \
@@ -99,6 +104,26 @@
 PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
 root complex event collector
 
+if section severity is fatal or recoverable
+<aer status string># :=
+unknown | unknown | unknown | unknown | Data Link Protocol | \
+unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
+Poisoned TLP | Flow Control Protocol | Completion Timeout | \
+Completer Abort | Unexpected Completion | Receiver Overflow | \
+Malformed TLP | ECRC | Unsupported Request
+else
+<aer status string># :=
+Receiver Error | unknown | unknown | unknown | unknown | unknown | \
+Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
+Replay Timer Timeout | Advisory Non-Fatal
+fi
+
+<aer layer string> :=
+Physical Layer | Data Link Layer | Transaction Layer
+
+<aer agent string> :=
+Receiver ID | Requester ID | Completer ID | Transmitter ID
+
 Where, [] designate corresponding content is optional
 
 All <field string> description with * has the following format:
diff --git a/MAINTAINERS b/MAINTAINERS
index 6f99e12..8afba63 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1692,6 +1692,13 @@
 S:	Supported
 F:	scripts/checkpatch.pl
 
+CHINESE DOCUMENTATION
+M:	Harry Wei <harryxiyou@gmail.com>
+L:	xiyoulinuxkernelgroup@googlegroups.com
+L:	linux-kernel@zh-kernel.org (moderated for non-subscribers)
+S:	Maintained
+F:	Documentation/zh_CN/
+
 CISCO VIC ETHERNET NIC DRIVER
 M:	Vasanthy Kolluri <vkolluri@cisco.com>
 M:	Roopa Prabhu <roprabhu@cisco.com>
@@ -5266,7 +5273,7 @@
 F:	drivers/net/wireless/rtl818x/rtl8180/
 
 RTL8187 WIRELESS DRIVER
-M:	Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:	Herton Ronaldo Krzesinski <herton@canonical.com>
 M:	Hin-Tak Leung <htl10@users.sourceforge.net>
 M:	Larry Finger <Larry.Finger@lwfinger.net>
 L:	linux-wireless@vger.kernel.org
@@ -6104,7 +6111,7 @@
 F:	security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
-M:	Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+M:	Herton Ronaldo Krzesinski <herton@canonical.com>
 L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/topstar-laptop.c
diff --git a/Makefile b/Makefile
index 26d7d82..2f7d922 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 38
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c..acb7ae5 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@
 	dd = clk->dpll_data;
 
 	/* DPLL divider must result in a valid jitter correction val */
-	fint = clk->parent->rate / (n + 1);
+	fint = clk->parent->rate / n;
 	if (fint < DPLL_FINT_BAND1_MIN) {
 
 		pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413d..0a585df 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -334,7 +334,7 @@
 	.priv	= &omap2_mbox_iva_priv,
 };
 
-struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 98148b6..6c84659 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -605,7 +605,7 @@
 	list_for_each_entry(e, &partition->muxmodes, node) {
 		struct omap_mux *m = &e->mux;
 
-		(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+		(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
 					  m, &omap_mux_dbg_signal_fops);
 	}
 }
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f565..a5a83b3 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@
 
 		}
 
-	(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
 				   &enable_off_mode, &pm_dbg_option_fops);
-	(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
 				   &sleep_while_idle, &pm_dbg_option_fops);
-	(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
+	(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
 				   &wakeup_timer_seconds, &pm_dbg_option_fops);
 	(void) debugfs_create_file("wakeup_timer_milliseconds",
-			S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
+			S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
 			&pm_dbg_option_fops);
 	pm_dbg_init_done = 1;
 
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644..3300ff6 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
 #define OMAP4430_PRCM_MPU_CPU1_INST		0x0800
 
 /* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS	0x0000
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS	0x0000
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS	0x0018
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS	0x0018
 
 
 /*
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index c37e823..95ac336 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -900,7 +900,7 @@
 		return PTR_ERR(dbg_dir);
 	}
 
-	(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+	(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
 				(void *)sr_info, &pm_sr_fops);
 	(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
 			&sr_info->err_weight);
@@ -939,7 +939,7 @@
 		strcpy(name, "volt_");
 		sprintf(volt_name, "%d", volt_data[i].volt_nominal);
 		strcat(name, volt_name);
-		(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+		(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
 				&(sr_info->nvalue_table[i].nvalue));
 	}
 
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 7b7c268..0fc550e 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,6 +39,7 @@
 #include <asm/mach/time.h>
 #include <plat/dmtimer.h>
 #include <asm/localtimer.h>
+#include <asm/sched_clock.h>
 
 #include "timer-gp.h"
 
@@ -190,6 +191,7 @@
 /*
  * clocksource
  */
+static DEFINE_CLOCK_DATA(cd);
 static struct omap_dm_timer *gpt_clocksource;
 static cycle_t clocksource_read_cycles(struct clocksource *cs)
 {
@@ -204,6 +206,15 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static void notrace dmtimer_update_sched_clock(void)
+{
+	u32 cyc;
+
+	cyc = omap_dm_timer_read_counter(gpt_clocksource);
+
+	update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 /* Setup free-running counter for clocksource */
 static void __init omap2_gp_clocksource_init(void)
 {
@@ -224,6 +235,8 @@
 
 	omap_dm_timer_set_load_start(gpt, 1, 0);
 
+	init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
+
 	if (clocksource_register_hz(&clocksource_gpt, tick_rate))
 		printk(err2, clocksource_gpt.name);
 }
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319..49d3208 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -322,15 +322,18 @@
 
 struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
 {
-	struct omap_mbox *mbox;
-	int ret;
+	struct omap_mbox *_mbox, *mbox = NULL;
+	int i, ret;
 
 	if (!mboxes)
 		return ERR_PTR(-EINVAL);
 
-	for (mbox = *mboxes; mbox; mbox++)
-		if (!strcmp(mbox->name, name))
+	for (i = 0; (_mbox = mboxes[i]); i++) {
+		if (!strcmp(_mbox->name, name)) {
+			mbox = _mbox;
 			break;
+		}
+	}
 
 	if (!mbox)
 		return ERR_PTR(-ENOENT);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index aa92684..ef14da1 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -88,6 +88,7 @@
 extern int acpi_pci_disabled;
 extern int acpi_skip_timer_override;
 extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
 
 extern u8 acpi_sci_flags;
 extern int acpi_sci_override_gsi;
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf3..725b778 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@
 	 */
 	CMOS_WRITE(0, 0xf);
 
-	*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+	*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
 }
 
 static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b3a7113..3e6e2d6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@
 int acpi_sci_override_gsi __initdata;
 int acpi_skip_timer_override __initdata;
 int acpi_use_timer_override __initdata;
+int acpi_fix_pin2_polarity __initdata;
 
 #ifdef CONFIG_X86_LOCAL_APIC
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@
 		return 0;
 	}
 
-	if (acpi_skip_timer_override &&
-	    intsrc->source_irq == 0 && intsrc->global_irq == 2) {
-		printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-		return 0;
+	if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+		if (acpi_skip_timer_override) {
+			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+			return 0;
+		}
+		if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+			intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+			printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+		}
 	}
 
 	mp_override_legacy_irq(intsrc->source_irq,
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 51ef31a..51d4e16 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@
 	memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
 
 	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
-		apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
+		adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
 		global_clock_event = &adev->evt;
 		printk(KERN_DEBUG "%s clockevent registered as global\n",
 		       global_clock_event->name);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 8209472..83930de 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -106,24 +106,34 @@
 ssize_t apei_read_mce(struct mce *m, u64 *record_id)
 {
 	struct cper_mce_record rcd;
-	ssize_t len;
+	int rc, pos;
 
-	len = erst_read_next(&rcd.hdr, sizeof(rcd));
-	if (len <= 0)
-		return len;
-	/* Can not skip other records in storage via ERST unless clear them */
-	else if (len != sizeof(rcd) ||
-		 uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
-		if (printk_ratelimit())
-			pr_warning(
-			"MCE-APEI: Can not skip the unknown record in ERST");
-		return -EIO;
-	}
-
+	rc = erst_get_record_id_begin(&pos);
+	if (rc)
+		return rc;
+retry:
+	rc = erst_get_record_id_next(&pos, record_id);
+	if (rc)
+		goto out;
+	/* no more record */
+	if (*record_id == APEI_ERST_INVALID_RECORD_ID)
+		goto out;
+	rc = erst_read(*record_id, &rcd.hdr, sizeof(rcd));
+	/* someone else has cleared the record, try next one */
+	if (rc == -ENOENT)
+		goto retry;
+	else if (rc < 0)
+		goto out;
+	/* try to skip other type records in storage */
+	else if (rc != sizeof(rcd) ||
+		 uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE))
+		goto retry;
 	memcpy(m, &rcd.mce, sizeof(*m));
-	*record_id = rcd.hdr.record_id;
+	rc = sizeof(*m);
+out:
+	erst_get_record_id_end();
 
-	return sizeof(*m);
+	return rc;
 }
 
 /* Check whether there is record in ERST */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd9..9efbdcc 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@
 
 static u32 __init ati_sbx00_rev(int num, int slot, int func)
 {
-	u32 old, d;
+	u32 d;
 
-	d = read_pci_config(num, slot, func, 0x70);
-	old = d;
-	d &= ~(1<<8);
-	write_pci_config(num, slot, func, 0x70, d);
 	d = read_pci_config(num, slot, func, 0x8);
 	d &= 0xff;
-	write_pci_config(num, slot, func, 0x70, old);
 
 	return d;
 }
@@ -160,11 +155,14 @@
 {
 	u32 d, rev;
 
-	if (acpi_use_timer_override)
+	rev = ati_sbx00_rev(num, slot, func);
+	if (rev >= 0x40)
+		acpi_fix_pin2_polarity = 1;
+
+	if (rev > 0x13)
 		return;
 
-	rev = ati_sbx00_rev(num, slot, func);
-	if (rev > 0x13)
+	if (acpi_use_timer_override)
 		return;
 
 	/* check for IRQ0 interrupt swap */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fc7aae1..715037c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -285,6 +285,14 @@
 			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
 		},
 	},
+	{	/* Handle problems with rebooting on VersaLogic Menlow boards */
+		.callback = set_bios_reboot,
+		.ident = "VersaLogic Menlow based board",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
+			DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
+		},
+	},
 	{ }
 };
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 54ce246..63fec15 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2777,6 +2777,8 @@
 			kvm_register_write(&svm->vcpu, reg, val);
 	}
 
+	skip_emulated_instruction(&svm->vcpu);
+
 	return 1;
 }
 
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772..cbf1112 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@
 	struct block_device *bdev = bdget_disk(disk, partno);
 	if (bdev) {
 		fsync_bdev(bdev);
-		res = __invalidate_device(bdev);
+		res = __invalidate_device(bdev, true);
 		bdput(bdev);
 	}
 	return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d46..1124cd2 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@
 			return -EINVAL;
 		if (get_user(n, (int __user *) arg))
 			return -EFAULT;
-		if (!(mode & FMODE_EXCL) &&
-		    blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
-			return -EBUSY;
+		if (!(mode & FMODE_EXCL)) {
+			bdgrab(bdev);
+			if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+				return -EBUSY;
+		}
 		ret = set_blocksize(bdev, n);
 		if (!(mode & FMODE_EXCL))
 			blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index eec2ead..a122471 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -10,7 +10,7 @@
 
 acpi-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
 	 dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
-	 dsinit.o
+	 dsinit.o dsargs.o dscontrol.o dswload2.o
 
 acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
 	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
@@ -45,4 +45,4 @@
 acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
 		utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
 		utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
-		utosi.o utxferror.o
+		utosi.o utxferror.o utdecode.o
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 666271b..2d1b7ff 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -48,7 +48,7 @@
 #define NAMEOF_ARG_NTE      "__A0"
 
 /*
- * dsopcode - support for late evaluation
+ * dsargs - execution of dynamic arguments for static objects
  */
 acpi_status
 acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
@@ -62,6 +62,20 @@
 
 acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc);
 
+/*
+ * dscontrol - support for execution control opcodes
+ */
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+			      union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+			    union acpi_parse_object *op);
+
+/*
+ * dsopcode - support for late operand evaluation
+ */
 acpi_status
 acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
 				   union acpi_parse_object *op);
@@ -86,17 +100,6 @@
 acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
 
 /*
- * dsctrl - Parser/Interpreter interface, control stack routines
- */
-acpi_status
-acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
-			      union acpi_parse_object *op);
-
-acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
-			    union acpi_parse_object *op);
-
-/*
  * dsexec - Parser/Interpreter interface, method execution callbacks
  */
 acpi_status
@@ -136,23 +139,26 @@
 			   struct acpi_walk_state *walk_state);
 
 /*
- * dsload - Parser/Interpreter interface, namespace load callbacks
+ * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
  */
 acpi_status
+acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
+
+acpi_status
 acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
 		       union acpi_parse_object **out_op);
 
 acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
 
+/*
+ * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
+ */
 acpi_status
 acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
 		       union acpi_parse_object **out_op);
 
 acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state);
 
-acpi_status
-acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
-
 /*
  * dsmthdat - method data (locals/args)
  */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 82a1bd2..d69750b 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -273,6 +273,10 @@
 ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
 ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
 
+/* Initialization sequencing */
+
+ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+
 /* Misc */
 
 ACPI_EXTERN u32 acpi_gbl_original_mode;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 54784bb..c7f743c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -89,25 +89,6 @@
 #define ACPI_MAX_MUTEX                  7
 #define ACPI_NUM_MUTEX                  ACPI_MAX_MUTEX+1
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-#ifdef DEFINE_ACPI_GLOBALS
-
-/* Debug names for the mutexes above */
-
-static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
-	"ACPI_MTX_Interpreter",
-	"ACPI_MTX_Namespace",
-	"ACPI_MTX_Tables",
-	"ACPI_MTX_Events",
-	"ACPI_MTX_Caches",
-	"ACPI_MTX_Memory",
-	"ACPI_MTX_CommandComplete",
-	"ACPI_MTX_CommandReady"
-};
-
-#endif
-#endif
-
 /* Lock structure for reader/writer interfaces */
 
 struct acpi_rw_lock {
@@ -416,10 +397,15 @@
 	u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
+struct acpi_gpe_notify_object {
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_notify_object *next;
+};
+
 union acpi_gpe_dispatch_info {
 	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
 	struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
-	struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
+	struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
 };
 
 /*
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
new file mode 100644
index 0000000..8c7b997
--- /dev/null
+++ b/drivers/acpi/acpica/dsargs.c
@@ -0,0 +1,391 @@
+/******************************************************************************
+ *
+ * Module Name: dsargs - Support for execution of dynamic arguments for static
+ *                       objects (regions, fields, buffer fields, etc.)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsargs")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+			  struct acpi_namespace_node *scope_node,
+			  u32 aml_length, u8 *aml_start);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_execute_arguments
+ *
+ * PARAMETERS:  Node                - Object NS node
+ *              scope_node          - Parent NS node
+ *              aml_length          - Length of executable AML
+ *              aml_start           - Pointer to the AML
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Late (deferred) execution of region or field arguments
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+			  struct acpi_namespace_node *scope_node,
+			  u32 aml_length, u8 *aml_start)
+{
+	acpi_status status;
+	union acpi_parse_object *op;
+	struct acpi_walk_state *walk_state;
+
+	ACPI_FUNCTION_TRACE(ds_execute_arguments);
+
+	/* Allocate a new parser op to be the root of the parsed tree */
+
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	if (!op) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Save the Node for use in acpi_ps_parse_aml */
+
+	op->common.node = scope_node;
+
+	/* Create and initialize a new parser state */
+
+	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+	if (!walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+				       aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* Mark this parse as a deferred opcode */
+
+	walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
+	walk_state->deferred_node = node;
+
+	/* Pass1: Parse the entire declaration */
+
+	status = acpi_ps_parse_aml(walk_state);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Get and init the Op created above */
+
+	op->common.node = node;
+	acpi_ps_delete_parse_tree(op);
+
+	/* Evaluate the deferred arguments */
+
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	if (!op) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	op->common.node = scope_node;
+
+	/* Create and initialize a new parser state */
+
+	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+	if (!walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Execute the opcode and arguments */
+
+	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+				       aml_length, NULL, ACPI_IMODE_EXECUTE);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* Mark this execution as a deferred opcode */
+
+	walk_state->deferred_node = node;
+	status = acpi_ps_parse_aml(walk_state);
+
+      cleanup:
+	acpi_ps_delete_parse_tree(op);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_buffer_field_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid buffer_field object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
+ *              evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
+{
+	union acpi_operand_object *extra_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the AML pointer (method object) and buffer_field node */
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	node = obj_desc->buffer_field.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
+						      node, NULL));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
+			  acpi_ut_get_node_name(node)));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node->parent,
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_bank_field_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid bank_field object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get bank_field bank_value. This implements the late
+ *              evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
+{
+	union acpi_operand_object *extra_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the AML pointer (method object) and bank_field node */
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	node = obj_desc->bank_field.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
+			  acpi_ut_get_node_name(node)));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node->parent,
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_buffer_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid Buffer object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get Buffer length and initializer byte list. This implements
+ *              the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the Buffer node */
+
+	node = obj_desc->buffer.node;
+	if (!node) {
+		ACPI_ERROR((AE_INFO,
+			    "No pointer back to namespace node in buffer object %p",
+			    obj_desc));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node,
+					   obj_desc->buffer.aml_length,
+					   obj_desc->buffer.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_package_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid Package object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get Package length and initializer byte list. This implements
+ *              the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the Package node */
+
+	node = obj_desc->package.node;
+	if (!node) {
+		ACPI_ERROR((AE_INFO,
+			    "No pointer back to namespace node in package %p",
+			    obj_desc));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node,
+					   obj_desc->package.aml_length,
+					   obj_desc->package.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_region_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid region object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get region address and length. This implements the late
+ *              evaluation of these region attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	union acpi_operand_object *extra_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
+
+	if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	if (!extra_desc) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Get the Region node */
+
+	node = obj_desc->region.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_REGION, node, NULL));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
+			  acpi_ut_get_node_name(node),
+			  extra_desc->extra.aml_start));
+
+	/* Execute the argument AML */
+
+	status = acpi_ds_execute_arguments(node, node->parent,
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
new file mode 100644
index 0000000..26c49ff
--- /dev/null
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -0,0 +1,410 @@
+/******************************************************************************
+ *
+ * Module Name: dscontrol - Support for execution control opcodes -
+ *                          if/else/while/return
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dscontrol")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_begin_control_op
+ *
+ * PARAMETERS:  walk_list       - The list that owns the walk stack
+ *              Op              - The control Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ *              execution.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+			      union acpi_parse_object *op)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *control_state;
+
+	ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n",
+			  op, op->common.aml_opcode, walk_state));
+
+	switch (op->common.aml_opcode) {
+	case AML_WHILE_OP:
+
+		/*
+		 * If this is an additional iteration of a while loop, continue.
+		 * There is no need to allocate a new control state.
+		 */
+		if (walk_state->control_state) {
+			if (walk_state->control_state->control.
+			    aml_predicate_start ==
+			    (walk_state->parser_state.aml - 1)) {
+
+				/* Reset the state to start-of-loop */
+
+				walk_state->control_state->common.state =
+				    ACPI_CONTROL_CONDITIONAL_EXECUTING;
+				break;
+			}
+		}
+
+		/*lint -fallthrough */
+
+	case AML_IF_OP:
+
+		/*
+		 * IF/WHILE: Create a new control state to manage these
+		 * constructs. We need to manage these as a stack, in order
+		 * to handle nesting.
+		 */
+		control_state = acpi_ut_create_control_state();
+		if (!control_state) {
+			status = AE_NO_MEMORY;
+			break;
+		}
+		/*
+		 * Save a pointer to the predicate for multiple executions
+		 * of a loop
+		 */
+		control_state->control.aml_predicate_start =
+		    walk_state->parser_state.aml - 1;
+		control_state->control.package_end =
+		    walk_state->parser_state.pkg_end;
+		control_state->control.opcode = op->common.aml_opcode;
+
+		/* Push the control state on this walk's control stack */
+
+		acpi_ut_push_generic_state(&walk_state->control_state,
+					   control_state);
+		break;
+
+	case AML_ELSE_OP:
+
+		/* Predicate is in the state object */
+		/* If predicate is true, the IF was executed, ignore ELSE part */
+
+		if (walk_state->last_predicate) {
+			status = AE_CTRL_TRUE;
+		}
+
+		break;
+
+	case AML_RETURN_OP:
+
+		break;
+
+	default:
+		break;
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_end_control_op
+ *
+ * PARAMETERS:  walk_list       - The list that owns the walk stack
+ *              Op              - The control Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ *              execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
+			    union acpi_parse_object * op)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *control_state;
+
+	ACPI_FUNCTION_NAME(ds_exec_end_control_op);
+
+	switch (op->common.aml_opcode) {
+	case AML_IF_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
+
+		/*
+		 * Save the result of the predicate in case there is an
+		 * ELSE to come
+		 */
+		walk_state->last_predicate =
+		    (u8)walk_state->control_state->common.value;
+
+		/*
+		 * Pop the control state that was created at the start
+		 * of the IF and free it
+		 */
+		control_state =
+		    acpi_ut_pop_generic_state(&walk_state->control_state);
+		acpi_ut_delete_generic_state(control_state);
+		break;
+
+	case AML_ELSE_OP:
+
+		break;
+
+	case AML_WHILE_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
+
+		control_state = walk_state->control_state;
+		if (control_state->common.value) {
+
+			/* Predicate was true, the body of the loop was just executed */
+
+			/*
+			 * This loop counter mechanism allows the interpreter to escape
+			 * possibly infinite loops. This can occur in poorly written AML
+			 * when the hardware does not respond within a while loop and the
+			 * loop does not implement a timeout.
+			 */
+			control_state->control.loop_count++;
+			if (control_state->control.loop_count >
+			    ACPI_MAX_LOOP_ITERATIONS) {
+				status = AE_AML_INFINITE_LOOP;
+				break;
+			}
+
+			/*
+			 * Go back and evaluate the predicate and maybe execute the loop
+			 * another time
+			 */
+			status = AE_CTRL_PENDING;
+			walk_state->aml_last_while =
+			    control_state->control.aml_predicate_start;
+			break;
+		}
+
+		/* Predicate was false, terminate this while loop */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "[WHILE_OP] termination! Op=%p\n", op));
+
+		/* Pop this control state and free it */
+
+		control_state =
+		    acpi_ut_pop_generic_state(&walk_state->control_state);
+		acpi_ut_delete_generic_state(control_state);
+		break;
+
+	case AML_RETURN_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "[RETURN_OP] Op=%p Arg=%p\n", op,
+				  op->common.value.arg));
+
+		/*
+		 * One optional operand -- the return value
+		 * It can be either an immediate operand or a result that
+		 * has been bubbled up the tree
+		 */
+		if (op->common.value.arg) {
+
+			/* Since we have a real Return(), delete any implicit return */
+
+			acpi_ds_clear_implicit_return(walk_state);
+
+			/* Return statement has an immediate operand */
+
+			status =
+			    acpi_ds_create_operands(walk_state,
+						    op->common.value.arg);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+
+			/*
+			 * If value being returned is a Reference (such as
+			 * an arg or local), resolve it now because it may
+			 * cease to exist at the end of the method.
+			 */
+			status =
+			    acpi_ex_resolve_to_value(&walk_state->operands[0],
+						     walk_state);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+
+			/*
+			 * Get the return value and save as the last result
+			 * value.  This is the only place where walk_state->return_desc
+			 * is set to anything other than zero!
+			 */
+			walk_state->return_desc = walk_state->operands[0];
+		} else if (walk_state->result_count) {
+
+			/* Since we have a real Return(), delete any implicit return */
+
+			acpi_ds_clear_implicit_return(walk_state);
+
+			/*
+			 * The return value has come from a previous calculation.
+			 *
+			 * If value being returned is a Reference (such as
+			 * an arg or local), resolve it now because it may
+			 * cease to exist at the end of the method.
+			 *
+			 * Allow references created by the Index operator to return
+			 * unchanged.
+			 */
+			if ((ACPI_GET_DESCRIPTOR_TYPE
+			     (walk_state->results->results.obj_desc[0]) ==
+			     ACPI_DESC_TYPE_OPERAND)
+			    && ((walk_state->results->results.obj_desc[0])->
+				common.type == ACPI_TYPE_LOCAL_REFERENCE)
+			    && ((walk_state->results->results.obj_desc[0])->
+				reference.class != ACPI_REFCLASS_INDEX)) {
+				status =
+				    acpi_ex_resolve_to_value(&walk_state->
+							     results->results.
+							     obj_desc[0],
+							     walk_state);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+			}
+
+			walk_state->return_desc =
+			    walk_state->results->results.obj_desc[0];
+		} else {
+			/* No return operand */
+
+			if (walk_state->num_operands) {
+				acpi_ut_remove_reference(walk_state->
+							 operands[0]);
+			}
+
+			walk_state->operands[0] = NULL;
+			walk_state->num_operands = 0;
+			walk_state->return_desc = NULL;
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Completed RETURN_OP State=%p, RetVal=%p\n",
+				  walk_state, walk_state->return_desc));
+
+		/* End the control method execution right now */
+
+		status = AE_CTRL_TERMINATE;
+		break;
+
+	case AML_NOOP_OP:
+
+		/* Just do nothing! */
+		break;
+
+	case AML_BREAK_POINT_OP:
+
+		/*
+		 * Set the single-step flag. This will cause the debugger (if present)
+		 * to break to the console within the AML debugger at the start of the
+		 * next AML instruction.
+		 */
+		ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
+		ACPI_DEBUGGER_EXEC(acpi_os_printf
+				   ("**break** Executed AML BreakPoint opcode\n"));
+
+		/* Call to the OSL in case OS wants a piece of the action */
+
+		status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+					"Executed AML Breakpoint opcode");
+		break;
+
+	case AML_BREAK_OP:
+	case AML_CONTINUE_OP:	/* ACPI 2.0 */
+
+		/* Pop and delete control states until we find a while */
+
+		while (walk_state->control_state &&
+		       (walk_state->control_state->control.opcode !=
+			AML_WHILE_OP)) {
+			control_state =
+			    acpi_ut_pop_generic_state(&walk_state->
+						      control_state);
+			acpi_ut_delete_generic_state(control_state);
+		}
+
+		/* No while found? */
+
+		if (!walk_state->control_state) {
+			return (AE_AML_NO_WHILE);
+		}
+
+		/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
+
+		walk_state->aml_last_while =
+		    walk_state->control_state->control.package_end;
+
+		/* Return status depending on opcode */
+
+		if (op->common.aml_opcode == AML_BREAK_OP) {
+			status = AE_CTRL_BREAK;
+		} else {
+			status = AE_CTRL_CONTINUE;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
+			    op->common.aml_opcode, op));
+
+		status = AE_AML_BAD_OPCODE;
+		break;
+	}
+
+	return (status);
+}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bbecf29..c627a28 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1,7 +1,6 @@
 /******************************************************************************
  *
- * Module Name: dsopcode - Dispatcher Op Region support and handling of
- *                         "control" opcodes
+ * Module Name: dsopcode - Dispatcher suport for regions and fields
  *
  *****************************************************************************/
 
@@ -57,11 +56,6 @@
 
 /* Local prototypes */
 static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
-			  struct acpi_namespace_node *scope_node,
-			  u32 aml_length, u8 * aml_start);
-
-static acpi_status
 acpi_ds_init_buffer_field(u16 aml_opcode,
 			  union acpi_operand_object *obj_desc,
 			  union acpi_operand_object *buffer_desc,
@@ -71,361 +65,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ds_execute_arguments
- *
- * PARAMETERS:  Node                - Object NS node
- *              scope_node          - Parent NS node
- *              aml_length          - Length of executable AML
- *              aml_start           - Pointer to the AML
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Late (deferred) execution of region or field arguments
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
-			  struct acpi_namespace_node *scope_node,
-			  u32 aml_length, u8 * aml_start)
-{
-	acpi_status status;
-	union acpi_parse_object *op;
-	struct acpi_walk_state *walk_state;
-
-	ACPI_FUNCTION_TRACE(ds_execute_arguments);
-
-	/*
-	 * Allocate a new parser op to be the root of the parsed tree
-	 */
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
-	if (!op) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Save the Node for use in acpi_ps_parse_aml */
-
-	op->common.node = scope_node;
-
-	/* Create and initialize a new parser state */
-
-	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
-	if (!walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
-				       aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* Mark this parse as a deferred opcode */
-
-	walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
-	walk_state->deferred_node = node;
-
-	/* Pass1: Parse the entire declaration */
-
-	status = acpi_ps_parse_aml(walk_state);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Get and init the Op created above */
-
-	op->common.node = node;
-	acpi_ps_delete_parse_tree(op);
-
-	/* Evaluate the deferred arguments */
-
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
-	if (!op) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	op->common.node = scope_node;
-
-	/* Create and initialize a new parser state */
-
-	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
-	if (!walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/* Execute the opcode and arguments */
-
-	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
-				       aml_length, NULL, ACPI_IMODE_EXECUTE);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* Mark this execution as a deferred opcode */
-
-	walk_state->deferred_node = node;
-	status = acpi_ps_parse_aml(walk_state);
-
-      cleanup:
-	acpi_ps_delete_parse_tree(op);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_buffer_field_arguments
- *
- * PARAMETERS:  obj_desc        - A valid buffer_field object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
- *              evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
-{
-	union acpi_operand_object *extra_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the AML pointer (method object) and buffer_field node */
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	node = obj_desc->buffer_field.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_BUFFER_FIELD, node, NULL));
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
-			  acpi_ut_get_node_name(node)));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node->parent,
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_bank_field_arguments
- *
- * PARAMETERS:  obj_desc        - A valid bank_field object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get bank_field bank_value. This implements the late
- *              evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
-{
-	union acpi_operand_object *extra_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the AML pointer (method object) and bank_field node */
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	node = obj_desc->bank_field.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
-			  acpi_ut_get_node_name(node)));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node->parent,
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_buffer_arguments
- *
- * PARAMETERS:  obj_desc        - A valid Buffer object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get Buffer length and initializer byte list.  This implements
- *              the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the Buffer node */
-
-	node = obj_desc->buffer.node;
-	if (!node) {
-		ACPI_ERROR((AE_INFO,
-			    "No pointer back to namespace node in buffer object %p",
-			    obj_desc));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node,
-					   obj_desc->buffer.aml_length,
-					   obj_desc->buffer.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_package_arguments
- *
- * PARAMETERS:  obj_desc        - A valid Package object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get Package length and initializer byte list.  This implements
- *              the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the Package node */
-
-	node = obj_desc->package.node;
-	if (!node) {
-		ACPI_ERROR((AE_INFO,
-			    "No pointer back to namespace node in package %p",
-			    obj_desc));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node,
-					   obj_desc->package.aml_length,
-					   obj_desc->package.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_get_region_arguments
- *
- * PARAMETERS:  obj_desc        - A valid region object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get region address and length.  This implements the late
- *              evaluation of these region attributes.
- *
- ****************************************************************************/
-
-acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	union acpi_operand_object *extra_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
-
-	if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	if (!extra_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Get the Region node */
-
-	node = obj_desc->region.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_REGION, node, NULL));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
-			  acpi_ut_get_node_name(node),
-			  extra_desc->extra.aml_start));
-
-	/* Execute the argument AML */
-
-	status = acpi_ds_execute_arguments(node, node->parent,
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Validate the region address/length via the host OS */
-
-	status = acpi_os_validate_address(obj_desc->region.space_id,
-					  obj_desc->region.address,
-					  (acpi_size) obj_desc->region.length,
-					  acpi_ut_get_node_name(node));
-
-	if (ACPI_FAILURE(status)) {
-		/*
-		 * Invalid address/length. We will emit an error message and mark
-		 * the region as invalid, so that it will cause an additional error if
-		 * it is ever used. Then return AE_OK.
-		 */
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During address validation of OpRegion [%4.4s]",
-				node->name.ascii));
-		obj_desc->common.flags |= AOPOBJ_INVALID;
-		status = AE_OK;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ds_initialize_region
  *
  * PARAMETERS:  obj_handle      - Region namespace node
@@ -826,8 +465,9 @@
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Get region address and length
- *              Called from acpi_ds_exec_end_op during data_table_region parse tree walk
+ * DESCRIPTION: Get region address and length.
+ *              Called from acpi_ds_exec_end_op during data_table_region parse
+ *              tree walk.
  *
  ******************************************************************************/
 
@@ -1114,360 +754,3 @@
 	acpi_ut_remove_reference(operand_desc);
 	return_ACPI_STATUS(status);
 }
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_begin_control_op
- *
- * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- *              execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
-			      union acpi_parse_object *op)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *control_state;
-
-	ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
-			  op->common.aml_opcode, walk_state));
-
-	switch (op->common.aml_opcode) {
-	case AML_WHILE_OP:
-
-		/*
-		 * If this is an additional iteration of a while loop, continue.
-		 * There is no need to allocate a new control state.
-		 */
-		if (walk_state->control_state) {
-			if (walk_state->control_state->control.aml_predicate_start
-				== (walk_state->parser_state.aml - 1)) {
-
-				/* Reset the state to start-of-loop */
-
-				walk_state->control_state->common.state =
-				    ACPI_CONTROL_CONDITIONAL_EXECUTING;
-				break;
-			}
-		}
-
-		/*lint -fallthrough */
-
-	case AML_IF_OP:
-
-		/*
-		 * IF/WHILE: Create a new control state to manage these
-		 * constructs. We need to manage these as a stack, in order
-		 * to handle nesting.
-		 */
-		control_state = acpi_ut_create_control_state();
-		if (!control_state) {
-			status = AE_NO_MEMORY;
-			break;
-		}
-		/*
-		 * Save a pointer to the predicate for multiple executions
-		 * of a loop
-		 */
-		control_state->control.aml_predicate_start =
-		    walk_state->parser_state.aml - 1;
-		control_state->control.package_end =
-		    walk_state->parser_state.pkg_end;
-		control_state->control.opcode = op->common.aml_opcode;
-
-		/* Push the control state on this walk's control stack */
-
-		acpi_ut_push_generic_state(&walk_state->control_state,
-					   control_state);
-		break;
-
-	case AML_ELSE_OP:
-
-		/* Predicate is in the state object */
-		/* If predicate is true, the IF was executed, ignore ELSE part */
-
-		if (walk_state->last_predicate) {
-			status = AE_CTRL_TRUE;
-		}
-
-		break;
-
-	case AML_RETURN_OP:
-
-		break;
-
-	default:
-		break;
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_end_control_op
- *
- * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- *              execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
-			    union acpi_parse_object * op)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *control_state;
-
-	ACPI_FUNCTION_NAME(ds_exec_end_control_op);
-
-	switch (op->common.aml_opcode) {
-	case AML_IF_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
-
-		/*
-		 * Save the result of the predicate in case there is an
-		 * ELSE to come
-		 */
-		walk_state->last_predicate =
-		    (u8) walk_state->control_state->common.value;
-
-		/*
-		 * Pop the control state that was created at the start
-		 * of the IF and free it
-		 */
-		control_state =
-		    acpi_ut_pop_generic_state(&walk_state->control_state);
-		acpi_ut_delete_generic_state(control_state);
-		break;
-
-	case AML_ELSE_OP:
-
-		break;
-
-	case AML_WHILE_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
-
-		control_state = walk_state->control_state;
-		if (control_state->common.value) {
-
-			/* Predicate was true, the body of the loop was just executed */
-
-			/*
-			 * This loop counter mechanism allows the interpreter to escape
-			 * possibly infinite loops. This can occur in poorly written AML
-			 * when the hardware does not respond within a while loop and the
-			 * loop does not implement a timeout.
-			 */
-			control_state->control.loop_count++;
-			if (control_state->control.loop_count >
-				ACPI_MAX_LOOP_ITERATIONS) {
-				status = AE_AML_INFINITE_LOOP;
-				break;
-			}
-
-			/*
-			 * Go back and evaluate the predicate and maybe execute the loop
-			 * another time
-			 */
-			status = AE_CTRL_PENDING;
-			walk_state->aml_last_while =
-			    control_state->control.aml_predicate_start;
-			break;
-		}
-
-		/* Predicate was false, terminate this while loop */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "[WHILE_OP] termination! Op=%p\n", op));
-
-		/* Pop this control state and free it */
-
-		control_state =
-		    acpi_ut_pop_generic_state(&walk_state->control_state);
-		acpi_ut_delete_generic_state(control_state);
-		break;
-
-	case AML_RETURN_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "[RETURN_OP] Op=%p Arg=%p\n", op,
-				  op->common.value.arg));
-
-		/*
-		 * One optional operand -- the return value
-		 * It can be either an immediate operand or a result that
-		 * has been bubbled up the tree
-		 */
-		if (op->common.value.arg) {
-
-			/* Since we have a real Return(), delete any implicit return */
-
-			acpi_ds_clear_implicit_return(walk_state);
-
-			/* Return statement has an immediate operand */
-
-			status =
-			    acpi_ds_create_operands(walk_state,
-						    op->common.value.arg);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-
-			/*
-			 * If value being returned is a Reference (such as
-			 * an arg or local), resolve it now because it may
-			 * cease to exist at the end of the method.
-			 */
-			status =
-			    acpi_ex_resolve_to_value(&walk_state->operands[0],
-						     walk_state);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-
-			/*
-			 * Get the return value and save as the last result
-			 * value.  This is the only place where walk_state->return_desc
-			 * is set to anything other than zero!
-			 */
-			walk_state->return_desc = walk_state->operands[0];
-		} else if (walk_state->result_count) {
-
-			/* Since we have a real Return(), delete any implicit return */
-
-			acpi_ds_clear_implicit_return(walk_state);
-
-			/*
-			 * The return value has come from a previous calculation.
-			 *
-			 * If value being returned is a Reference (such as
-			 * an arg or local), resolve it now because it may
-			 * cease to exist at the end of the method.
-			 *
-			 * Allow references created by the Index operator to return unchanged.
-			 */
-			if ((ACPI_GET_DESCRIPTOR_TYPE
-			     (walk_state->results->results.obj_desc[0]) ==
-			     ACPI_DESC_TYPE_OPERAND)
-			    && ((walk_state->results->results.obj_desc[0])->
-				common.type == ACPI_TYPE_LOCAL_REFERENCE)
-			    && ((walk_state->results->results.obj_desc[0])->
-				reference.class != ACPI_REFCLASS_INDEX)) {
-				status =
-				    acpi_ex_resolve_to_value(&walk_state->
-							     results->results.
-							     obj_desc[0],
-							     walk_state);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-			}
-
-			walk_state->return_desc =
-			    walk_state->results->results.obj_desc[0];
-		} else {
-			/* No return operand */
-
-			if (walk_state->num_operands) {
-				acpi_ut_remove_reference(walk_state->
-							 operands[0]);
-			}
-
-			walk_state->operands[0] = NULL;
-			walk_state->num_operands = 0;
-			walk_state->return_desc = NULL;
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Completed RETURN_OP State=%p, RetVal=%p\n",
-				  walk_state, walk_state->return_desc));
-
-		/* End the control method execution right now */
-
-		status = AE_CTRL_TERMINATE;
-		break;
-
-	case AML_NOOP_OP:
-
-		/* Just do nothing! */
-		break;
-
-	case AML_BREAK_POINT_OP:
-
-		/*
-		 * Set the single-step flag. This will cause the debugger (if present)
-		 * to break to the console within the AML debugger at the start of the
-		 * next AML instruction.
-		 */
-		ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
-		ACPI_DEBUGGER_EXEC(acpi_os_printf
-				   ("**break** Executed AML BreakPoint opcode\n"));
-
-		/* Call to the OSL in case OS wants a piece of the action */
-
-		status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
-					"Executed AML Breakpoint opcode");
-		break;
-
-	case AML_BREAK_OP:
-	case AML_CONTINUE_OP:	/* ACPI 2.0 */
-
-		/* Pop and delete control states until we find a while */
-
-		while (walk_state->control_state &&
-		       (walk_state->control_state->control.opcode !=
-			AML_WHILE_OP)) {
-			control_state =
-			    acpi_ut_pop_generic_state(&walk_state->
-						      control_state);
-			acpi_ut_delete_generic_state(control_state);
-		}
-
-		/* No while found? */
-
-		if (!walk_state->control_state) {
-			return (AE_AML_NO_WHILE);
-		}
-
-		/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
-
-		walk_state->aml_last_while =
-		    walk_state->control_state->control.package_end;
-
-		/* Return status depending on opcode */
-
-		if (op->common.aml_opcode == AML_BREAK_OP) {
-			status = AE_CTRL_BREAK;
-		} else {
-			status = AE_CTRL_CONTINUE;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
-			    op->common.aml_opcode, op));
-
-		status = AE_AML_BAD_OPCODE;
-		break;
-	}
-
-	return (status);
-}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 52566ff..23a3b1a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Module Name: dswload - Dispatcher namespace load callbacks
+ * Module Name: dswload - Dispatcher first pass namespace load callbacks
  *
  *****************************************************************************/
 
@@ -48,7 +48,6 @@
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acnamesp.h"
-#include "acevents.h"
 
 #ifdef ACPI_ASL_COMPILER
 #include <acpi/acdisasm.h>
@@ -537,670 +536,3 @@
 
 	return_ACPI_STATUS(status);
 }
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load2_begin_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              out_op          - Wher to return op if a new one is created
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Descending callback used during the loading of ACPI tables.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
-		       union acpi_parse_object **out_op)
-{
-	union acpi_parse_object *op;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	acpi_object_type object_type;
-	char *buffer_ptr;
-	u32 flags;
-
-	ACPI_FUNCTION_TRACE(ds_load2_begin_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
-			  walk_state));
-
-	if (op) {
-		if ((walk_state->control_state) &&
-		    (walk_state->control_state->common.state ==
-		     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
-
-			/* We are executing a while loop outside of a method */
-
-			status = acpi_ds_exec_begin_op(walk_state, out_op);
-			return_ACPI_STATUS(status);
-		}
-
-		/* We only care about Namespace opcodes here */
-
-		if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
-		     (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
-		    (!(walk_state->op_info->flags & AML_NAMED))) {
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/* Get the name we are going to enter or lookup in the namespace */
-
-		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
-
-			/* For Namepath op, get the path string */
-
-			buffer_ptr = op->common.value.string;
-			if (!buffer_ptr) {
-
-				/* No name, just exit */
-
-				return_ACPI_STATUS(AE_OK);
-			}
-		} else {
-			/* Get name from the op */
-
-			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
-		}
-	} else {
-		/* Get the namestring from the raw AML */
-
-		buffer_ptr =
-		    acpi_ps_get_next_namestring(&walk_state->parser_state);
-	}
-
-	/* Map the opcode into an internal object type */
-
-	object_type = walk_state->op_info->object_type;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "State=%p Op=%p Type=%X\n", walk_state, op,
-			  object_type));
-
-	switch (walk_state->opcode) {
-	case AML_FIELD_OP:
-	case AML_BANK_FIELD_OP:
-	case AML_INDEX_FIELD_OP:
-
-		node = NULL;
-		status = AE_OK;
-		break;
-
-	case AML_INT_NAMEPATH_OP:
-		/*
-		 * The name_path is an object reference to an existing object.
-		 * Don't enter the name into the namespace, but look it up
-		 * for use later.
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-				   object_type, ACPI_IMODE_EXECUTE,
-				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
-		break;
-
-	case AML_SCOPE_OP:
-
-		/* Special case for Scope(\) -> refers to the Root node */
-
-		if (op && (op->named.node == acpi_gbl_root_node)) {
-			node = op->named.node;
-
-			status =
-			    acpi_ds_scope_stack_push(node, object_type,
-						     walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		} else {
-			/*
-			 * The Path is an object reference to an existing object.
-			 * Don't enter the name into the namespace, but look it up
-			 * for use later.
-			 */
-			status =
-			    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-					   object_type, ACPI_IMODE_EXECUTE,
-					   ACPI_NS_SEARCH_PARENT, walk_state,
-					   &(node));
-			if (ACPI_FAILURE(status)) {
-#ifdef ACPI_ASL_COMPILER
-				if (status == AE_NOT_FOUND) {
-					status = AE_OK;
-				} else {
-					ACPI_ERROR_NAMESPACE(buffer_ptr,
-							     status);
-				}
-#else
-				ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-#endif
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/*
-		 * We must check to make sure that the target is
-		 * one of the opcodes that actually opens a scope
-		 */
-		switch (node->type) {
-		case ACPI_TYPE_ANY:
-		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope */
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_POWER:
-		case ACPI_TYPE_PROCESSOR:
-		case ACPI_TYPE_THERMAL:
-
-			/* These are acceptable types */
-			break;
-
-		case ACPI_TYPE_INTEGER:
-		case ACPI_TYPE_STRING:
-		case ACPI_TYPE_BUFFER:
-
-			/*
-			 * These types we will allow, but we will change the type.
-			 * This enables some existing code of the form:
-			 *
-			 *  Name (DEB, 0)
-			 *  Scope (DEB) { ... }
-			 */
-			ACPI_WARNING((AE_INFO,
-				      "Type override - [%4.4s] had invalid type (%s) "
-				      "for Scope operator, changed to type ANY\n",
-				      acpi_ut_get_node_name(node),
-				      acpi_ut_get_type_name(node->type)));
-
-			node->type = ACPI_TYPE_ANY;
-			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
-			break;
-
-		default:
-
-			/* All other types are an error */
-
-			ACPI_ERROR((AE_INFO,
-				    "Invalid type (%s) for target of "
-				    "Scope operator [%4.4s] (Cannot override)",
-				    acpi_ut_get_type_name(node->type),
-				    acpi_ut_get_node_name(node)));
-
-			return (AE_AML_OPERAND_TYPE);
-		}
-		break;
-
-	default:
-
-		/* All other opcodes */
-
-		if (op && op->common.node) {
-
-			/* This op/node was previously entered into the namespace */
-
-			node = op->common.node;
-
-			if (acpi_ns_opens_scope(object_type)) {
-				status =
-				    acpi_ds_scope_stack_push(node, object_type,
-							     walk_state);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-			}
-
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*
-		 * Enter the named type into the internal namespace. We enter the name
-		 * as we go downward in the parse tree. Any necessary subobjects that
-		 * involve arguments to the opcode must be created as we go back up the
-		 * parse tree later.
-		 *
-		 * Note: Name may already exist if we are executing a deferred opcode.
-		 */
-		if (walk_state->deferred_node) {
-
-			/* This name is already in the namespace, get the node */
-
-			node = walk_state->deferred_node;
-			status = AE_OK;
-			break;
-		}
-
-		flags = ACPI_NS_NO_UPSEARCH;
-		if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
-
-			/* Execution mode, node cannot already exist, node is temporary */
-
-			flags |= ACPI_NS_ERROR_IF_FOUND;
-
-			if (!
-			    (walk_state->
-			     parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
-				flags |= ACPI_NS_TEMPORARY;
-			}
-		}
-
-		/* Add new entry or lookup existing entry */
-
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-				   object_type, ACPI_IMODE_LOAD_PASS2, flags,
-				   walk_state, &node);
-
-		if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "***New Node [%4.4s] %p is temporary\n",
-					  acpi_ut_get_node_name(node), node));
-		}
-		break;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-		return_ACPI_STATUS(status);
-	}
-
-	if (!op) {
-
-		/* Create a new op */
-
-		op = acpi_ps_alloc_op(walk_state->opcode);
-		if (!op) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Initialize the new op */
-
-		if (node) {
-			op->named.name = node->name.integer;
-		}
-		*out_op = op;
-	}
-
-	/*
-	 * Put the Node in the "op" object that the parser uses, so we
-	 * can get it again quickly when this scope is closed
-	 */
-	op->common.node = node;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load2_end_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Ascending callback used during the loading of the namespace,
- *              both control methods and everything else.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
-{
-	union acpi_parse_object *op;
-	acpi_status status = AE_OK;
-	acpi_object_type object_type;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *arg;
-	struct acpi_namespace_node *new_node;
-#ifndef ACPI_NO_METHOD_EXECUTION
-	u32 i;
-	u8 region_space;
-#endif
-
-	ACPI_FUNCTION_TRACE(ds_load2_end_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
-			  walk_state->op_info->name, op, walk_state));
-
-	/* Check if opcode had an associated namespace object */
-
-	if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (op->common.aml_opcode == AML_SCOPE_OP) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Ending scope Op=%p State=%p\n", op,
-				  walk_state));
-	}
-
-	object_type = walk_state->op_info->object_type;
-
-	/*
-	 * Get the Node/name from the earlier lookup
-	 * (It was saved in the *op structure)
-	 */
-	node = op->common.node;
-
-	/*
-	 * Put the Node on the object stack (Contains the ACPI Name of
-	 * this object)
-	 */
-	walk_state->operands[0] = (void *)node;
-	walk_state->num_operands = 1;
-
-	/* Pop the scope stack */
-
-	if (acpi_ns_opens_scope(object_type) &&
-	    (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "(%s) Popping scope for Op %p\n",
-				  acpi_ut_get_type_name(object_type), op));
-
-		status = acpi_ds_scope_stack_pop(walk_state);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-	}
-
-	/*
-	 * Named operations are as follows:
-	 *
-	 * AML_ALIAS
-	 * AML_BANKFIELD
-	 * AML_CREATEBITFIELD
-	 * AML_CREATEBYTEFIELD
-	 * AML_CREATEDWORDFIELD
-	 * AML_CREATEFIELD
-	 * AML_CREATEQWORDFIELD
-	 * AML_CREATEWORDFIELD
-	 * AML_DATA_REGION
-	 * AML_DEVICE
-	 * AML_EVENT
-	 * AML_FIELD
-	 * AML_INDEXFIELD
-	 * AML_METHOD
-	 * AML_METHODCALL
-	 * AML_MUTEX
-	 * AML_NAME
-	 * AML_NAMEDFIELD
-	 * AML_OPREGION
-	 * AML_POWERRES
-	 * AML_PROCESSOR
-	 * AML_SCOPE
-	 * AML_THERMALZONE
-	 */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
-			  acpi_ps_get_opcode_name(op->common.aml_opcode),
-			  walk_state, op, node));
-
-	/* Decode the opcode */
-
-	arg = op->common.value.arg;
-
-	switch (walk_state->op_info->type) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-
-	case AML_TYPE_CREATE_FIELD:
-		/*
-		 * Create the field object, but the field buffer and index must
-		 * be evaluated later during the execution phase
-		 */
-		status = acpi_ds_create_buffer_field(op, walk_state);
-		break;
-
-	case AML_TYPE_NAMED_FIELD:
-		/*
-		 * If we are executing a method, initialize the field
-		 */
-		if (walk_state->method_node) {
-			status = acpi_ds_init_field_objects(op, walk_state);
-		}
-
-		switch (op->common.aml_opcode) {
-		case AML_INDEX_FIELD_OP:
-
-			status =
-			    acpi_ds_create_index_field(op,
-						       (acpi_handle) arg->
-						       common.node, walk_state);
-			break;
-
-		case AML_BANK_FIELD_OP:
-
-			status =
-			    acpi_ds_create_bank_field(op, arg->common.node,
-						      walk_state);
-			break;
-
-		case AML_FIELD_OP:
-
-			status =
-			    acpi_ds_create_field(op, arg->common.node,
-						 walk_state);
-			break;
-
-		default:
-			/* All NAMED_FIELD opcodes must be handled above */
-			break;
-		}
-		break;
-
-	case AML_TYPE_NAMED_SIMPLE:
-
-		status = acpi_ds_create_operands(walk_state, arg);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		switch (op->common.aml_opcode) {
-		case AML_PROCESSOR_OP:
-
-			status = acpi_ex_create_processor(walk_state);
-			break;
-
-		case AML_POWER_RES_OP:
-
-			status = acpi_ex_create_power_resource(walk_state);
-			break;
-
-		case AML_MUTEX_OP:
-
-			status = acpi_ex_create_mutex(walk_state);
-			break;
-
-		case AML_EVENT_OP:
-
-			status = acpi_ex_create_event(walk_state);
-			break;
-
-		case AML_ALIAS_OP:
-
-			status = acpi_ex_create_alias(walk_state);
-			break;
-
-		default:
-			/* Unknown opcode */
-
-			status = AE_OK;
-			goto cleanup;
-		}
-
-		/* Delete operands */
-
-		for (i = 1; i < walk_state->num_operands; i++) {
-			acpi_ut_remove_reference(walk_state->operands[i]);
-			walk_state->operands[i] = NULL;
-		}
-
-		break;
-#endif				/* ACPI_NO_METHOD_EXECUTION */
-
-	case AML_TYPE_NAMED_COMPLEX:
-
-		switch (op->common.aml_opcode) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-		case AML_REGION_OP:
-		case AML_DATA_REGION_OP:
-
-			if (op->common.aml_opcode == AML_REGION_OP) {
-				region_space = (acpi_adr_space_type)
-				    ((op->common.value.arg)->common.value.
-				     integer);
-			} else {
-				region_space = REGION_DATA_TABLE;
-			}
-
-			/*
-			 * The op_region is not fully parsed at this time. The only valid
-			 * argument is the space_id. (We must save the address of the
-			 * AML of the address and length operands)
-			 *
-			 * If we have a valid region, initialize it. The namespace is
-			 * unlocked at this point.
-			 *
-			 * Need to unlock interpreter if it is locked (if we are running
-			 * a control method), in order to allow _REG methods to be run
-			 * during acpi_ev_initialize_region.
-			 */
-			if (walk_state->method_node) {
-				/*
-				 * Executing a method: initialize the region and unlock
-				 * the interpreter
-				 */
-				status =
-				    acpi_ex_create_region(op->named.data,
-							  op->named.length,
-							  region_space,
-							  walk_state);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-
-				acpi_ex_exit_interpreter();
-			}
-
-			status =
-			    acpi_ev_initialize_region
-			    (acpi_ns_get_attached_object(node), FALSE);
-			if (walk_state->method_node) {
-				acpi_ex_enter_interpreter();
-			}
-
-			if (ACPI_FAILURE(status)) {
-				/*
-				 *  If AE_NOT_EXIST is returned, it is not fatal
-				 *  because many regions get created before a handler
-				 *  is installed for said region.
-				 */
-				if (AE_NOT_EXIST == status) {
-					status = AE_OK;
-				}
-			}
-			break;
-
-		case AML_NAME_OP:
-
-			status = acpi_ds_create_node(walk_state, node, op);
-			break;
-
-		case AML_METHOD_OP:
-			/*
-			 * method_op pkg_length name_string method_flags term_list
-			 *
-			 * Note: We must create the method node/object pair as soon as we
-			 * see the method declaration. This allows later pass1 parsing
-			 * of invocations of the method (need to know the number of
-			 * arguments.)
-			 */
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
-					  walk_state, op, op->named.node));
-
-			if (!acpi_ns_get_attached_object(op->named.node)) {
-				walk_state->operands[0] =
-				    ACPI_CAST_PTR(void, op->named.node);
-				walk_state->num_operands = 1;
-
-				status =
-				    acpi_ds_create_operands(walk_state,
-							    op->common.value.
-							    arg);
-				if (ACPI_SUCCESS(status)) {
-					status =
-					    acpi_ex_create_method(op->named.
-								  data,
-								  op->named.
-								  length,
-								  walk_state);
-				}
-				walk_state->operands[0] = NULL;
-				walk_state->num_operands = 0;
-
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-			}
-			break;
-
-#endif				/* ACPI_NO_METHOD_EXECUTION */
-
-		default:
-			/* All NAMED_COMPLEX opcodes must be handled above */
-			break;
-		}
-		break;
-
-	case AML_CLASS_INTERNAL:
-
-		/* case AML_INT_NAMEPATH_OP: */
-		break;
-
-	case AML_CLASS_METHOD_CALL:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
-				  walk_state, op, node));
-
-		/*
-		 * Lookup the method name and save the Node
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.string, ACPI_TYPE_ANY,
-				   ACPI_IMODE_LOAD_PASS2,
-				   ACPI_NS_SEARCH_PARENT |
-				   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
-				   &(new_node));
-		if (ACPI_SUCCESS(status)) {
-			/*
-			 * Make sure that what we found is indeed a method
-			 * We didn't search for a method on purpose, to see if the name
-			 * would resolve
-			 */
-			if (new_node->type != ACPI_TYPE_METHOD) {
-				status = AE_AML_OPERAND_TYPE;
-			}
-
-			/* We could put the returned object (Node) on the object stack for
-			 * later, but for now, we will put it in the "op" object that the
-			 * parser uses, so we can get it again at the end of this scope
-			 */
-			op->common.node = new_node;
-		} else {
-			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-      cleanup:
-
-	/* Remove the Node pushed at the very beginning */
-
-	walk_state->operands[0] = NULL;
-	walk_state->num_operands = 0;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
new file mode 100644
index 0000000..4be4e92
--- /dev/null
+++ b/drivers/acpi/acpica/dswload2.c
@@ -0,0 +1,720 @@
+/******************************************************************************
+ *
+ * Module Name: dswload2 - Dispatcher second pass namespace load callbacks
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswload2")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load2_begin_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              out_op          - Wher to return op if a new one is created
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+		       union acpi_parse_object **out_op)
+{
+	union acpi_parse_object *op;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	acpi_object_type object_type;
+	char *buffer_ptr;
+	u32 flags;
+
+	ACPI_FUNCTION_TRACE(ds_load2_begin_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+			  walk_state));
+
+	if (op) {
+		if ((walk_state->control_state) &&
+		    (walk_state->control_state->common.state ==
+		     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
+			/* We are executing a while loop outside of a method */
+
+			status = acpi_ds_exec_begin_op(walk_state, out_op);
+			return_ACPI_STATUS(status);
+		}
+
+		/* We only care about Namespace opcodes here */
+
+		if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
+		     (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
+		    (!(walk_state->op_info->flags & AML_NAMED))) {
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/* Get the name we are going to enter or lookup in the namespace */
+
+		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
+			/* For Namepath op, get the path string */
+
+			buffer_ptr = op->common.value.string;
+			if (!buffer_ptr) {
+
+				/* No name, just exit */
+
+				return_ACPI_STATUS(AE_OK);
+			}
+		} else {
+			/* Get name from the op */
+
+			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
+		}
+	} else {
+		/* Get the namestring from the raw AML */
+
+		buffer_ptr =
+		    acpi_ps_get_next_namestring(&walk_state->parser_state);
+	}
+
+	/* Map the opcode into an internal object type */
+
+	object_type = walk_state->op_info->object_type;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "State=%p Op=%p Type=%X\n", walk_state, op,
+			  object_type));
+
+	switch (walk_state->opcode) {
+	case AML_FIELD_OP:
+	case AML_BANK_FIELD_OP:
+	case AML_INDEX_FIELD_OP:
+
+		node = NULL;
+		status = AE_OK;
+		break;
+
+	case AML_INT_NAMEPATH_OP:
+		/*
+		 * The name_path is an object reference to an existing object.
+		 * Don't enter the name into the namespace, but look it up
+		 * for use later.
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+				   object_type, ACPI_IMODE_EXECUTE,
+				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+		break;
+
+	case AML_SCOPE_OP:
+
+		/* Special case for Scope(\) -> refers to the Root node */
+
+		if (op && (op->named.node == acpi_gbl_root_node)) {
+			node = op->named.node;
+
+			status =
+			    acpi_ds_scope_stack_push(node, object_type,
+						     walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		} else {
+			/*
+			 * The Path is an object reference to an existing object.
+			 * Don't enter the name into the namespace, but look it up
+			 * for use later.
+			 */
+			status =
+			    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+					   object_type, ACPI_IMODE_EXECUTE,
+					   ACPI_NS_SEARCH_PARENT, walk_state,
+					   &(node));
+			if (ACPI_FAILURE(status)) {
+#ifdef ACPI_ASL_COMPILER
+				if (status == AE_NOT_FOUND) {
+					status = AE_OK;
+				} else {
+					ACPI_ERROR_NAMESPACE(buffer_ptr,
+							     status);
+				}
+#else
+				ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+#endif
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/*
+		 * We must check to make sure that the target is
+		 * one of the opcodes that actually opens a scope
+		 */
+		switch (node->type) {
+		case ACPI_TYPE_ANY:
+		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope */
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_POWER:
+		case ACPI_TYPE_PROCESSOR:
+		case ACPI_TYPE_THERMAL:
+
+			/* These are acceptable types */
+			break;
+
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_STRING:
+		case ACPI_TYPE_BUFFER:
+
+			/*
+			 * These types we will allow, but we will change the type.
+			 * This enables some existing code of the form:
+			 *
+			 *  Name (DEB, 0)
+			 *  Scope (DEB) { ... }
+			 */
+			ACPI_WARNING((AE_INFO,
+				      "Type override - [%4.4s] had invalid type (%s) "
+				      "for Scope operator, changed to type ANY\n",
+				      acpi_ut_get_node_name(node),
+				      acpi_ut_get_type_name(node->type)));
+
+			node->type = ACPI_TYPE_ANY;
+			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+			break;
+
+		default:
+
+			/* All other types are an error */
+
+			ACPI_ERROR((AE_INFO,
+				    "Invalid type (%s) for target of "
+				    "Scope operator [%4.4s] (Cannot override)",
+				    acpi_ut_get_type_name(node->type),
+				    acpi_ut_get_node_name(node)));
+
+			return (AE_AML_OPERAND_TYPE);
+		}
+		break;
+
+	default:
+
+		/* All other opcodes */
+
+		if (op && op->common.node) {
+
+			/* This op/node was previously entered into the namespace */
+
+			node = op->common.node;
+
+			if (acpi_ns_opens_scope(object_type)) {
+				status =
+				    acpi_ds_scope_stack_push(node, object_type,
+							     walk_state);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+			}
+
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*
+		 * Enter the named type into the internal namespace. We enter the name
+		 * as we go downward in the parse tree. Any necessary subobjects that
+		 * involve arguments to the opcode must be created as we go back up the
+		 * parse tree later.
+		 *
+		 * Note: Name may already exist if we are executing a deferred opcode.
+		 */
+		if (walk_state->deferred_node) {
+
+			/* This name is already in the namespace, get the node */
+
+			node = walk_state->deferred_node;
+			status = AE_OK;
+			break;
+		}
+
+		flags = ACPI_NS_NO_UPSEARCH;
+		if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
+
+			/* Execution mode, node cannot already exist, node is temporary */
+
+			flags |= ACPI_NS_ERROR_IF_FOUND;
+
+			if (!
+			    (walk_state->
+			     parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+				flags |= ACPI_NS_TEMPORARY;
+			}
+		}
+
+		/* Add new entry or lookup existing entry */
+
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+				   object_type, ACPI_IMODE_LOAD_PASS2, flags,
+				   walk_state, &node);
+
+		if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "***New Node [%4.4s] %p is temporary\n",
+					  acpi_ut_get_node_name(node), node));
+		}
+		break;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+		return_ACPI_STATUS(status);
+	}
+
+	if (!op) {
+
+		/* Create a new op */
+
+		op = acpi_ps_alloc_op(walk_state->opcode);
+		if (!op) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Initialize the new op */
+
+		if (node) {
+			op->named.name = node->name.integer;
+		}
+		*out_op = op;
+	}
+
+	/*
+	 * Put the Node in the "op" object that the parser uses, so we
+	 * can get it again quickly when this scope is closed
+	 */
+	op->common.node = node;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load2_end_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ *              both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
+{
+	union acpi_parse_object *op;
+	acpi_status status = AE_OK;
+	acpi_object_type object_type;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *arg;
+	struct acpi_namespace_node *new_node;
+#ifndef ACPI_NO_METHOD_EXECUTION
+	u32 i;
+	u8 region_space;
+#endif
+
+	ACPI_FUNCTION_TRACE(ds_load2_end_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
+			  walk_state->op_info->name, op, walk_state));
+
+	/* Check if opcode had an associated namespace object */
+
+	if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (op->common.aml_opcode == AML_SCOPE_OP) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Ending scope Op=%p State=%p\n", op,
+				  walk_state));
+	}
+
+	object_type = walk_state->op_info->object_type;
+
+	/*
+	 * Get the Node/name from the earlier lookup
+	 * (It was saved in the *op structure)
+	 */
+	node = op->common.node;
+
+	/*
+	 * Put the Node on the object stack (Contains the ACPI Name of
+	 * this object)
+	 */
+	walk_state->operands[0] = (void *)node;
+	walk_state->num_operands = 1;
+
+	/* Pop the scope stack */
+
+	if (acpi_ns_opens_scope(object_type) &&
+	    (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "(%s) Popping scope for Op %p\n",
+				  acpi_ut_get_type_name(object_type), op));
+
+		status = acpi_ds_scope_stack_pop(walk_state);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Named operations are as follows:
+	 *
+	 * AML_ALIAS
+	 * AML_BANKFIELD
+	 * AML_CREATEBITFIELD
+	 * AML_CREATEBYTEFIELD
+	 * AML_CREATEDWORDFIELD
+	 * AML_CREATEFIELD
+	 * AML_CREATEQWORDFIELD
+	 * AML_CREATEWORDFIELD
+	 * AML_DATA_REGION
+	 * AML_DEVICE
+	 * AML_EVENT
+	 * AML_FIELD
+	 * AML_INDEXFIELD
+	 * AML_METHOD
+	 * AML_METHODCALL
+	 * AML_MUTEX
+	 * AML_NAME
+	 * AML_NAMEDFIELD
+	 * AML_OPREGION
+	 * AML_POWERRES
+	 * AML_PROCESSOR
+	 * AML_SCOPE
+	 * AML_THERMALZONE
+	 */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
+			  acpi_ps_get_opcode_name(op->common.aml_opcode),
+			  walk_state, op, node));
+
+	/* Decode the opcode */
+
+	arg = op->common.value.arg;
+
+	switch (walk_state->op_info->type) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+
+	case AML_TYPE_CREATE_FIELD:
+		/*
+		 * Create the field object, but the field buffer and index must
+		 * be evaluated later during the execution phase
+		 */
+		status = acpi_ds_create_buffer_field(op, walk_state);
+		break;
+
+	case AML_TYPE_NAMED_FIELD:
+		/*
+		 * If we are executing a method, initialize the field
+		 */
+		if (walk_state->method_node) {
+			status = acpi_ds_init_field_objects(op, walk_state);
+		}
+
+		switch (op->common.aml_opcode) {
+		case AML_INDEX_FIELD_OP:
+
+			status =
+			    acpi_ds_create_index_field(op,
+						       (acpi_handle) arg->
+						       common.node, walk_state);
+			break;
+
+		case AML_BANK_FIELD_OP:
+
+			status =
+			    acpi_ds_create_bank_field(op, arg->common.node,
+						      walk_state);
+			break;
+
+		case AML_FIELD_OP:
+
+			status =
+			    acpi_ds_create_field(op, arg->common.node,
+						 walk_state);
+			break;
+
+		default:
+			/* All NAMED_FIELD opcodes must be handled above */
+			break;
+		}
+		break;
+
+	case AML_TYPE_NAMED_SIMPLE:
+
+		status = acpi_ds_create_operands(walk_state, arg);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		switch (op->common.aml_opcode) {
+		case AML_PROCESSOR_OP:
+
+			status = acpi_ex_create_processor(walk_state);
+			break;
+
+		case AML_POWER_RES_OP:
+
+			status = acpi_ex_create_power_resource(walk_state);
+			break;
+
+		case AML_MUTEX_OP:
+
+			status = acpi_ex_create_mutex(walk_state);
+			break;
+
+		case AML_EVENT_OP:
+
+			status = acpi_ex_create_event(walk_state);
+			break;
+
+		case AML_ALIAS_OP:
+
+			status = acpi_ex_create_alias(walk_state);
+			break;
+
+		default:
+			/* Unknown opcode */
+
+			status = AE_OK;
+			goto cleanup;
+		}
+
+		/* Delete operands */
+
+		for (i = 1; i < walk_state->num_operands; i++) {
+			acpi_ut_remove_reference(walk_state->operands[i]);
+			walk_state->operands[i] = NULL;
+		}
+
+		break;
+#endif				/* ACPI_NO_METHOD_EXECUTION */
+
+	case AML_TYPE_NAMED_COMPLEX:
+
+		switch (op->common.aml_opcode) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+		case AML_REGION_OP:
+		case AML_DATA_REGION_OP:
+
+			if (op->common.aml_opcode == AML_REGION_OP) {
+				region_space = (acpi_adr_space_type)
+				    ((op->common.value.arg)->common.value.
+				     integer);
+			} else {
+				region_space = REGION_DATA_TABLE;
+			}
+
+			/*
+			 * The op_region is not fully parsed at this time. The only valid
+			 * argument is the space_id. (We must save the address of the
+			 * AML of the address and length operands)
+			 *
+			 * If we have a valid region, initialize it. The namespace is
+			 * unlocked at this point.
+			 *
+			 * Need to unlock interpreter if it is locked (if we are running
+			 * a control method), in order to allow _REG methods to be run
+			 * during acpi_ev_initialize_region.
+			 */
+			if (walk_state->method_node) {
+				/*
+				 * Executing a method: initialize the region and unlock
+				 * the interpreter
+				 */
+				status =
+				    acpi_ex_create_region(op->named.data,
+							  op->named.length,
+							  region_space,
+							  walk_state);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+
+				acpi_ex_exit_interpreter();
+			}
+
+			status =
+			    acpi_ev_initialize_region
+			    (acpi_ns_get_attached_object(node), FALSE);
+			if (walk_state->method_node) {
+				acpi_ex_enter_interpreter();
+			}
+
+			if (ACPI_FAILURE(status)) {
+				/*
+				 *  If AE_NOT_EXIST is returned, it is not fatal
+				 *  because many regions get created before a handler
+				 *  is installed for said region.
+				 */
+				if (AE_NOT_EXIST == status) {
+					status = AE_OK;
+				}
+			}
+			break;
+
+		case AML_NAME_OP:
+
+			status = acpi_ds_create_node(walk_state, node, op);
+			break;
+
+		case AML_METHOD_OP:
+			/*
+			 * method_op pkg_length name_string method_flags term_list
+			 *
+			 * Note: We must create the method node/object pair as soon as we
+			 * see the method declaration. This allows later pass1 parsing
+			 * of invocations of the method (need to know the number of
+			 * arguments.)
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+					  walk_state, op, op->named.node));
+
+			if (!acpi_ns_get_attached_object(op->named.node)) {
+				walk_state->operands[0] =
+				    ACPI_CAST_PTR(void, op->named.node);
+				walk_state->num_operands = 1;
+
+				status =
+				    acpi_ds_create_operands(walk_state,
+							    op->common.value.
+							    arg);
+				if (ACPI_SUCCESS(status)) {
+					status =
+					    acpi_ex_create_method(op->named.
+								  data,
+								  op->named.
+								  length,
+								  walk_state);
+				}
+				walk_state->operands[0] = NULL;
+				walk_state->num_operands = 0;
+
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+			}
+			break;
+
+#endif				/* ACPI_NO_METHOD_EXECUTION */
+
+		default:
+			/* All NAMED_COMPLEX opcodes must be handled above */
+			break;
+		}
+		break;
+
+	case AML_CLASS_INTERNAL:
+
+		/* case AML_INT_NAMEPATH_OP: */
+		break;
+
+	case AML_CLASS_METHOD_CALL:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
+				  walk_state, op, node));
+
+		/*
+		 * Lookup the method name and save the Node
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info,
+				   arg->common.value.string, ACPI_TYPE_ANY,
+				   ACPI_IMODE_LOAD_PASS2,
+				   ACPI_NS_SEARCH_PARENT |
+				   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+				   &(new_node));
+		if (ACPI_SUCCESS(status)) {
+			/*
+			 * Make sure that what we found is indeed a method
+			 * We didn't search for a method on purpose, to see if the name
+			 * would resolve
+			 */
+			if (new_node->type != ACPI_TYPE_METHOD) {
+				status = AE_AML_OPERAND_TYPE;
+			}
+
+			/* We could put the returned object (Node) on the object stack for
+			 * later, but for now, we will put it in the "op" object that the
+			 * parser uses, so we can get it again at the end of this scope
+			 */
+			op->common.node = new_node;
+		} else {
+			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+      cleanup:
+
+	/* Remove the Node pushed at the very beginning */
+
+	walk_state->operands[0] = NULL;
+	walk_state->num_operands = 0;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 14988a8..65c79ad 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -373,6 +373,15 @@
 
 			gpe_register_info = &gpe_block->register_info[i];
 
+			/*
+			 * Optimization: If there are no GPEs enabled within this
+			 * register, we can safely ignore the entire register.
+			 */
+			if (!(gpe_register_info->enable_for_run |
+			      gpe_register_info->enable_for_wake)) {
+				continue;
+			}
+
 			/* Read the Status Register */
 
 			status =
@@ -457,6 +466,7 @@
 	acpi_status status;
 	struct acpi_gpe_event_info *local_gpe_event_info;
 	struct acpi_evaluate_info *info;
+	struct acpi_gpe_notify_object *notify_object;
 
 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
@@ -508,10 +518,18 @@
 		 * from this thread -- because handlers may in turn run other
 		 * control methods.
 		 */
-		status =
-		    acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
-						 device_node,
-						 ACPI_NOTIFY_DEVICE_WAKE);
+		status = acpi_ev_queue_notify_request(
+				local_gpe_event_info->dispatch.device.node,
+				ACPI_NOTIFY_DEVICE_WAKE);
+
+		notify_object = local_gpe_event_info->dispatch.device.next;
+		while (ACPI_SUCCESS(status) && notify_object) {
+			status = acpi_ev_queue_notify_request(
+					notify_object->node,
+					ACPI_NOTIFY_DEVICE_WAKE);
+			notify_object = notify_object->next;
+		}
+
 		break;
 
 	case ACPI_GPE_DISPATCH_METHOD:
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 785a5ee..bea7223 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -231,6 +231,8 @@
 		}
 	}
 
+	acpi_gbl_reg_methods_executed = TRUE;
+
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3b20a34..52aaff3 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -198,7 +198,9 @@
 	acpi_status status = AE_BAD_PARAMETER;
 	struct acpi_gpe_event_info *gpe_event_info;
 	struct acpi_namespace_node *device_node;
+	struct acpi_gpe_notify_object *notify_object;
 	acpi_cpu_flags flags;
+	u8 gpe_dispatch_mask;
 
 	ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
 
@@ -221,27 +223,49 @@
 		goto unlock_and_exit;
 	}
 
+	if (wake_device == ACPI_ROOT_OBJECT) {
+		goto out;
+	}
+
 	/*
 	 * If there is no method or handler for this GPE, then the
 	 * wake_device will be notified whenever this GPE fires (aka
 	 * "implicit notify") Note: The GPE is assumed to be
 	 * level-triggered (for windows compatibility).
 	 */
-	if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-	      ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
-
-		/* Validate wake_device is of type Device */
-
-		device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
-					    wake_device);
-		if (device_node->type != ACPI_TYPE_DEVICE) {
-			goto unlock_and_exit;
-		}
-		gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
-					 ACPI_GPE_LEVEL_TRIGGERED);
-		gpe_event_info->dispatch.device_node = device_node;
+	gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+	if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+	    && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+		goto out;
 	}
 
+	/* Validate wake_device is of type Device */
+
+	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+	if (device_node->type != ACPI_TYPE_DEVICE) {
+		goto unlock_and_exit;
+	}
+
+	if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
+		gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+					 ACPI_GPE_LEVEL_TRIGGERED);
+		gpe_event_info->dispatch.device.node = device_node;
+		gpe_event_info->dispatch.device.next = NULL;
+	} else {
+		/* There are multiple devices to notify implicitly. */
+
+		notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+		if (!notify_object) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		notify_object->node = device_node;
+		notify_object->next = gpe_event_info->dispatch.device.next;
+		gpe_event_info->dispatch.device.next = notify_object;
+	}
+
+ out:
 	gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
 	status = AE_OK;
 
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index eb73867..c85c8c4 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -110,9 +110,39 @@
 		goto unlock_and_exit;
 	}
 
-	/* Run all _REG methods for this address space */
+	/*
+	 * For the default space_iDs, (the IDs for which there are default region handlers
+	 * installed) Only execute the _REG methods if the global initialization _REG
+	 * methods have already been run (via acpi_initialize_objects). In other words,
+	 * we will defer the execution of the _REG methods for these space_iDs until
+	 * execution of acpi_initialize_objects. This is done because we need the handlers
+	 * for the default spaces (mem/io/pci/table) to be installed before we can run
+	 * any control methods (or _REG methods). There is known BIOS code that depends
+	 * on this.
+	 *
+	 * For all other space_iDs, we can safely execute the _REG methods immediately.
+	 * This means that for IDs like embedded_controller, this function should be called
+	 * only after acpi_enable_subsystem has been called.
+	 */
+	switch (space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+	case ACPI_ADR_SPACE_PCI_CONFIG:
+	case ACPI_ADR_SPACE_DATA_TABLE:
 
-	status = acpi_ev_execute_reg_methods(node, space_id);
+		if (acpi_gbl_reg_methods_executed) {
+
+			/* Run all _REG methods for this address space */
+
+			status = acpi_ev_execute_reg_methods(node, space_id);
+		}
+		break;
+
+	default:
+
+		status = acpi_ev_execute_reg_methods(node, space_id);
+		break;
+	}
 
       unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 6c79c29..f915a7f 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -280,13 +280,13 @@
 	if (ACPI_FAILURE(status)) {
 		if (status == AE_NOT_IMPLEMENTED) {
 			ACPI_ERROR((AE_INFO,
-				    "Region %s(0x%X) not implemented",
+				    "Region %s (ID=%u) not implemented",
 				    acpi_ut_get_region_name(rgn_desc->region.
 							    space_id),
 				    rgn_desc->region.space_id));
 		} else if (status == AE_NOT_EXIST) {
 			ACPI_ERROR((AE_INFO,
-				    "Region %s(0x%X) has no handler",
+				    "Region %s (ID=%u) has no handler",
 				    acpi_ut_get_region_name(rgn_desc->region.
 							    space_id),
 				    rgn_desc->region.space_id));
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 428d44e..6f5588e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -384,8 +384,11 @@
 	 *
 	 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
 	 * offset 45, 55, 95, and the word located at offset 109, 110.
+	 *
+	 * Note: The FADT revision value is unreliable. Only the length can be
+	 * trusted.
 	 */
-	if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
+	if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
 		acpi_gbl_FADT.preferred_profile = 0;
 		acpi_gbl_FADT.pstate_control = 0;
 		acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
new file mode 100644
index 0000000..136a814
--- /dev/null
+++ b/drivers/acpi/acpica/utdecode.c
@@ -0,0 +1,548 @@
+/******************************************************************************
+ *
+ * Module Name: utdecode - Utility decoding routines (value-to-string)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utdecode")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_format_exception
+ *
+ * PARAMETERS:  Status       - The acpi_status code to be formatted
+ *
+ * RETURN:      A string containing the exception text. A valid pointer is
+ *              always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII string
+ *              It is here instead of utxface.c so it is always present.
+ *
+ ******************************************************************************/
+const char *acpi_format_exception(acpi_status status)
+{
+	const char *exception = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	exception = acpi_ut_validate_exception(status);
+	if (!exception) {
+
+		/* Exception code was not recognized */
+
+		ACPI_ERROR((AE_INFO,
+			    "Unknown exception code: 0x%8.8X", status));
+
+		exception = "UNKNOWN_STATUS_CODE";
+	}
+
+	return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*
+ * Properties of the ACPI Object Types, both internal and external.
+ * The table is indexed by values of acpi_object_type
+ */
+const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
+	ACPI_NS_NORMAL,		/* 00 Any              */
+	ACPI_NS_NORMAL,		/* 01 Number           */
+	ACPI_NS_NORMAL,		/* 02 String           */
+	ACPI_NS_NORMAL,		/* 03 Buffer           */
+	ACPI_NS_NORMAL,		/* 04 Package          */
+	ACPI_NS_NORMAL,		/* 05 field_unit       */
+	ACPI_NS_NEWSCOPE,	/* 06 Device           */
+	ACPI_NS_NORMAL,		/* 07 Event            */
+	ACPI_NS_NEWSCOPE,	/* 08 Method           */
+	ACPI_NS_NORMAL,		/* 09 Mutex            */
+	ACPI_NS_NORMAL,		/* 10 Region           */
+	ACPI_NS_NEWSCOPE,	/* 11 Power            */
+	ACPI_NS_NEWSCOPE,	/* 12 Processor        */
+	ACPI_NS_NEWSCOPE,	/* 13 Thermal          */
+	ACPI_NS_NORMAL,		/* 14 buffer_field     */
+	ACPI_NS_NORMAL,		/* 15 ddb_handle       */
+	ACPI_NS_NORMAL,		/* 16 Debug Object     */
+	ACPI_NS_NORMAL,		/* 17 def_field        */
+	ACPI_NS_NORMAL,		/* 18 bank_field       */
+	ACPI_NS_NORMAL,		/* 19 index_field      */
+	ACPI_NS_NORMAL,		/* 20 Reference        */
+	ACPI_NS_NORMAL,		/* 21 Alias            */
+	ACPI_NS_NORMAL,		/* 22 method_alias     */
+	ACPI_NS_NORMAL,		/* 23 Notify           */
+	ACPI_NS_NORMAL,		/* 24 Address Handler  */
+	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 25 Resource Desc    */
+	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 26 Resource Field   */
+	ACPI_NS_NEWSCOPE,	/* 27 Scope            */
+	ACPI_NS_NORMAL,		/* 28 Extra            */
+	ACPI_NS_NORMAL,		/* 29 Data             */
+	ACPI_NS_NORMAL		/* 30 Invalid          */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_hex_to_ascii_char
+ *
+ * PARAMETERS:  Integer             - Contains the hex digit
+ *              Position            - bit position of the digit within the
+ *                                    integer (multiple of 4)
+ *
+ * RETURN:      The converted Ascii character
+ *
+ * DESCRIPTION: Convert a hex digit to an Ascii character
+ *
+ ******************************************************************************/
+
+/* Hex to ASCII conversion table */
+
+static const char acpi_gbl_hex_to_ascii[] = {
+	'0', '1', '2', '3', '4', '5', '6', '7',
+	'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
+};
+
+char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
+{
+
+	return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_region_name
+ *
+ * PARAMETERS:  Space ID            - ID for the region
+ *
+ * RETURN:      Decoded region space_id name
+ *
+ * DESCRIPTION: Translate a Space ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Region type decoding */
+
+const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
+	"SystemMemory",
+	"SystemIO",
+	"PCI_Config",
+	"EmbeddedControl",
+	"SMBus",
+	"SystemCMOS",
+	"PCIBARTarget",
+	"IPMI",
+	"DataTable"
+};
+
+char *acpi_ut_get_region_name(u8 space_id)
+{
+
+	if (space_id >= ACPI_USER_REGION_BEGIN) {
+		return ("UserDefinedRegion");
+	} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+		return ("FunctionalFixedHW");
+	} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
+		return ("InvalidSpaceId");
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_event_name
+ *
+ * PARAMETERS:  event_id            - Fixed event ID
+ *
+ * RETURN:      Decoded event ID name
+ *
+ * DESCRIPTION: Translate a Event ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Event type decoding */
+
+static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
+	"PM_Timer",
+	"GlobalLock",
+	"PowerButton",
+	"SleepButton",
+	"RealTimeClock",
+};
+
+char *acpi_ut_get_event_name(u32 event_id)
+{
+
+	if (event_id > ACPI_EVENT_MAX) {
+		return ("InvalidEventID");
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_type_name
+ *
+ * PARAMETERS:  Type                - An ACPI object type
+ *
+ * RETURN:      Decoded ACPI object type name
+ *
+ * DESCRIPTION: Translate a Type ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/*
+ * Elements of acpi_gbl_ns_type_names below must match
+ * one-to-one with values of acpi_object_type
+ *
+ * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
+ * when stored in a table it really means that we have thus far seen no
+ * evidence to indicate what type is actually going to be stored for this entry.
+ */
+static const char acpi_gbl_bad_type[] = "UNDEFINED";
+
+/* Printable names of the ACPI object types */
+
+static const char *acpi_gbl_ns_type_names[] = {
+	/* 00 */ "Untyped",
+	/* 01 */ "Integer",
+	/* 02 */ "String",
+	/* 03 */ "Buffer",
+	/* 04 */ "Package",
+	/* 05 */ "FieldUnit",
+	/* 06 */ "Device",
+	/* 07 */ "Event",
+	/* 08 */ "Method",
+	/* 09 */ "Mutex",
+	/* 10 */ "Region",
+	/* 11 */ "Power",
+	/* 12 */ "Processor",
+	/* 13 */ "Thermal",
+	/* 14 */ "BufferField",
+	/* 15 */ "DdbHandle",
+	/* 16 */ "DebugObject",
+	/* 17 */ "RegionField",
+	/* 18 */ "BankField",
+	/* 19 */ "IndexField",
+	/* 20 */ "Reference",
+	/* 21 */ "Alias",
+	/* 22 */ "MethodAlias",
+	/* 23 */ "Notify",
+	/* 24 */ "AddrHandler",
+	/* 25 */ "ResourceDesc",
+	/* 26 */ "ResourceFld",
+	/* 27 */ "Scope",
+	/* 28 */ "Extra",
+	/* 29 */ "Data",
+	/* 30 */ "Invalid"
+};
+
+char *acpi_ut_get_type_name(acpi_object_type type)
+{
+
+	if (type > ACPI_TYPE_INVALID) {
+		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
+}
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
+{
+
+	if (!obj_desc) {
+		return ("[NULL Object Descriptor]");
+	}
+
+	return (acpi_ut_get_type_name(obj_desc->common.type));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_node_name
+ *
+ * PARAMETERS:  Object               - A namespace node
+ *
+ * RETURN:      ASCII name of the node
+ *
+ * DESCRIPTION: Validate the node and return the node's ACPI name.
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_node_name(void *object)
+{
+	struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
+
+	/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+
+	if (!object) {
+		return ("NULL");
+	}
+
+	/* Check for Root node */
+
+	if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
+		return ("\"\\\" ");
+	}
+
+	/* Descriptor must be a namespace node */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+		return ("####");
+	}
+
+	/*
+	 * Ensure name is valid. The name was validated/repaired when the node
+	 * was created, but make sure it has not been corrupted.
+	 */
+	acpi_ut_repair_name(node->name.ascii);
+
+	/* Return the name */
+
+	return (node->name.ascii);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_descriptor_name
+ *
+ * PARAMETERS:  Object               - An ACPI object
+ *
+ * RETURN:      Decoded name of the descriptor type
+ *
+ * DESCRIPTION: Validate object and return the descriptor type
+ *
+ ******************************************************************************/
+
+/* Printable names of object descriptor types */
+
+static const char *acpi_gbl_desc_type_names[] = {
+	/* 00 */ "Not a Descriptor",
+	/* 01 */ "Cached",
+	/* 02 */ "State-Generic",
+	/* 03 */ "State-Update",
+	/* 04 */ "State-Package",
+	/* 05 */ "State-Control",
+	/* 06 */ "State-RootParseScope",
+	/* 07 */ "State-ParseScope",
+	/* 08 */ "State-WalkScope",
+	/* 09 */ "State-Result",
+	/* 10 */ "State-Notify",
+	/* 11 */ "State-Thread",
+	/* 12 */ "Walk",
+	/* 13 */ "Parser",
+	/* 14 */ "Operand",
+	/* 15 */ "Node"
+};
+
+char *acpi_ut_get_descriptor_name(void *object)
+{
+
+	if (!object) {
+		return ("NULL OBJECT");
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
+		return ("Not a Descriptor");
+	}
+
+	return (ACPI_CAST_PTR(char,
+			      acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
+						       (object)]));
+
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_reference_name
+ *
+ * PARAMETERS:  Object               - An ACPI reference object
+ *
+ * RETURN:      Decoded name of the type of reference
+ *
+ * DESCRIPTION: Decode a reference object sub-type to a string.
+ *
+ ******************************************************************************/
+
+/* Printable names of reference object sub-types */
+
+static const char *acpi_gbl_ref_class_names[] = {
+	/* 00 */ "Local",
+	/* 01 */ "Argument",
+	/* 02 */ "RefOf",
+	/* 03 */ "Index",
+	/* 04 */ "DdbHandle",
+	/* 05 */ "Named Object",
+	/* 06 */ "Debug"
+};
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
+{
+
+	if (!object) {
+		return ("NULL Object");
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
+		return ("Not an Operand object");
+	}
+
+	if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
+		return ("Not a Reference object");
+	}
+
+	if (object->reference.class > ACPI_REFCLASS_MAX) {
+		return ("Unknown Reference class");
+	}
+
+	return (acpi_gbl_ref_class_names[object->reference.class]);
+}
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*
+ * Strings and procedures used for debug only
+ */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_mutex_name
+ *
+ * PARAMETERS:  mutex_id        - The predefined ID for this mutex.
+ *
+ * RETURN:      Decoded name of the internal mutex
+ *
+ * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Names for internal mutex objects, used for debug output */
+
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
+	"ACPI_MTX_Interpreter",
+	"ACPI_MTX_Namespace",
+	"ACPI_MTX_Tables",
+	"ACPI_MTX_Events",
+	"ACPI_MTX_Caches",
+	"ACPI_MTX_Memory",
+	"ACPI_MTX_CommandComplete",
+	"ACPI_MTX_CommandReady"
+};
+
+char *acpi_ut_get_mutex_name(u32 mutex_id)
+{
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return ("Invalid Mutex ID");
+	}
+
+	return (acpi_gbl_mutex_names[mutex_id]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_notify_name
+ *
+ * PARAMETERS:  notify_value    - Value from the Notify() request
+ *
+ * RETURN:      Decoded name for the notify value
+ *
+ * DESCRIPTION: Translate a Notify Value to a notify namestring.
+ *
+ ******************************************************************************/
+
+/* Names for Notify() values, used for debug output */
+
+static const char *acpi_gbl_notify_value_names[] = {
+	"Bus Check",
+	"Device Check",
+	"Device Wake",
+	"Eject Request",
+	"Device Check Light",
+	"Frequency Mismatch",
+	"Bus Mode Mismatch",
+	"Power Fault",
+	"Capabilities Check",
+	"Device PLD Check",
+	"Reserved",
+	"System Locality Update"
+};
+
+const char *acpi_ut_get_notify_name(u32 notify_value)
+{
+
+	if (notify_value <= ACPI_NOTIFY_MAX) {
+		return (acpi_gbl_notify_value_names[notify_value]);
+	} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+		return ("Reserved");
+	} else {		/* Greater or equal to 0x80 */
+
+		return ("**Device Specific**");
+	}
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_object_type
+ *
+ * PARAMETERS:  Type            - Object type to be validated
+ *
+ * RETURN:      TRUE if valid object type, FALSE otherwise
+ *
+ * DESCRIPTION: Validate an object type
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_object_type(acpi_object_type type)
+{
+
+	if (type > ACPI_TYPE_LOCAL_MAX) {
+
+		/* Note: Assumes all TYPEs are contiguous (external/local) */
+
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 97dd9bb..833a38a 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -45,7 +45,6 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "acnamesp.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utglobal")
@@ -107,43 +106,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_format_exception
- *
- * PARAMETERS:  Status       - The acpi_status code to be formatted
- *
- * RETURN:      A string containing the exception text. A valid pointer is
- *              always returned.
- *
- * DESCRIPTION: This function translates an ACPI exception into an ASCII string
- *              It is here instead of utxface.c so it is always present.
- *
- ******************************************************************************/
-
-const char *acpi_format_exception(acpi_status status)
-{
-	const char *exception = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	exception = acpi_ut_validate_exception(status);
-	if (!exception) {
-
-		/* Exception code was not recognized */
-
-		ACPI_ERROR((AE_INFO,
-			    "Unknown exception code: 0x%8.8X", status));
-
-		exception = "UNKNOWN_STATUS_CODE";
-		dump_stack();
-	}
-
-	return (ACPI_CAST_PTR(const char, exception));
-}
-
-ACPI_EXPORT_SYMBOL(acpi_format_exception)
-
-/*******************************************************************************
- *
  * Namespace globals
  *
  ******************************************************************************/
@@ -177,71 +139,6 @@
 	{NULL, ACPI_TYPE_ANY, NULL}
 };
 
-/*
- * Properties of the ACPI Object Types, both internal and external.
- * The table is indexed by values of acpi_object_type
- */
-const u8 acpi_gbl_ns_properties[] = {
-	ACPI_NS_NORMAL,		/* 00 Any              */
-	ACPI_NS_NORMAL,		/* 01 Number           */
-	ACPI_NS_NORMAL,		/* 02 String           */
-	ACPI_NS_NORMAL,		/* 03 Buffer           */
-	ACPI_NS_NORMAL,		/* 04 Package          */
-	ACPI_NS_NORMAL,		/* 05 field_unit       */
-	ACPI_NS_NEWSCOPE,	/* 06 Device           */
-	ACPI_NS_NORMAL,		/* 07 Event            */
-	ACPI_NS_NEWSCOPE,	/* 08 Method           */
-	ACPI_NS_NORMAL,		/* 09 Mutex            */
-	ACPI_NS_NORMAL,		/* 10 Region           */
-	ACPI_NS_NEWSCOPE,	/* 11 Power            */
-	ACPI_NS_NEWSCOPE,	/* 12 Processor        */
-	ACPI_NS_NEWSCOPE,	/* 13 Thermal          */
-	ACPI_NS_NORMAL,		/* 14 buffer_field     */
-	ACPI_NS_NORMAL,		/* 15 ddb_handle       */
-	ACPI_NS_NORMAL,		/* 16 Debug Object     */
-	ACPI_NS_NORMAL,		/* 17 def_field        */
-	ACPI_NS_NORMAL,		/* 18 bank_field       */
-	ACPI_NS_NORMAL,		/* 19 index_field      */
-	ACPI_NS_NORMAL,		/* 20 Reference        */
-	ACPI_NS_NORMAL,		/* 21 Alias            */
-	ACPI_NS_NORMAL,		/* 22 method_alias     */
-	ACPI_NS_NORMAL,		/* 23 Notify           */
-	ACPI_NS_NORMAL,		/* 24 Address Handler  */
-	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 25 Resource Desc    */
-	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 26 Resource Field   */
-	ACPI_NS_NEWSCOPE,	/* 27 Scope            */
-	ACPI_NS_NORMAL,		/* 28 Extra            */
-	ACPI_NS_NORMAL,		/* 29 Data             */
-	ACPI_NS_NORMAL		/* 30 Invalid          */
-};
-
-/* Hex to ASCII conversion table */
-
-static const char acpi_gbl_hex_to_ascii[] = {
-	'0', '1', '2', '3', '4', '5', '6', '7',
-	'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_hex_to_ascii_char
- *
- * PARAMETERS:  Integer             - Contains the hex digit
- *              Position            - bit position of the digit within the
- *                                    integer (multiple of 4)
- *
- * RETURN:      The converted Ascii character
- *
- * DESCRIPTION: Convert a hex digit to an Ascii character
- *
- ******************************************************************************/
-
-char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
-{
-
-	return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
-}
-
 /******************************************************************************
  *
  * Event and Hardware globals
@@ -341,386 +238,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ut_get_region_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Space ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Region type decoding */
-
-const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
-	"SystemMemory",
-	"SystemIO",
-	"PCI_Config",
-	"EmbeddedControl",
-	"SMBus",
-	"SystemCMOS",
-	"PCIBARTarget",
-	"IPMI",
-	"DataTable"
-};
-
-char *acpi_ut_get_region_name(u8 space_id)
-{
-
-	if (space_id >= ACPI_USER_REGION_BEGIN) {
-		return ("UserDefinedRegion");
-	} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
-		return ("InvalidSpaceId");
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_event_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Event ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Event type decoding */
-
-static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
-	"PM_Timer",
-	"GlobalLock",
-	"PowerButton",
-	"SleepButton",
-	"RealTimeClock",
-};
-
-char *acpi_ut_get_event_name(u32 event_id)
-{
-
-	if (event_id > ACPI_EVENT_MAX) {
-		return ("InvalidEventID");
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_type_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Type ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/*
- * Elements of acpi_gbl_ns_type_names below must match
- * one-to-one with values of acpi_object_type
- *
- * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
- * when stored in a table it really means that we have thus far seen no
- * evidence to indicate what type is actually going to be stored for this entry.
- */
-static const char acpi_gbl_bad_type[] = "UNDEFINED";
-
-/* Printable names of the ACPI object types */
-
-static const char *acpi_gbl_ns_type_names[] = {
-	/* 00 */ "Untyped",
-	/* 01 */ "Integer",
-	/* 02 */ "String",
-	/* 03 */ "Buffer",
-	/* 04 */ "Package",
-	/* 05 */ "FieldUnit",
-	/* 06 */ "Device",
-	/* 07 */ "Event",
-	/* 08 */ "Method",
-	/* 09 */ "Mutex",
-	/* 10 */ "Region",
-	/* 11 */ "Power",
-	/* 12 */ "Processor",
-	/* 13 */ "Thermal",
-	/* 14 */ "BufferField",
-	/* 15 */ "DdbHandle",
-	/* 16 */ "DebugObject",
-	/* 17 */ "RegionField",
-	/* 18 */ "BankField",
-	/* 19 */ "IndexField",
-	/* 20 */ "Reference",
-	/* 21 */ "Alias",
-	/* 22 */ "MethodAlias",
-	/* 23 */ "Notify",
-	/* 24 */ "AddrHandler",
-	/* 25 */ "ResourceDesc",
-	/* 26 */ "ResourceFld",
-	/* 27 */ "Scope",
-	/* 28 */ "Extra",
-	/* 29 */ "Data",
-	/* 30 */ "Invalid"
-};
-
-char *acpi_ut_get_type_name(acpi_object_type type)
-{
-
-	if (type > ACPI_TYPE_INVALID) {
-		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
-}
-
-char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
-{
-
-	if (!obj_desc) {
-		return ("[NULL Object Descriptor]");
-	}
-
-	return (acpi_ut_get_type_name(obj_desc->common.type));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_node_name
- *
- * PARAMETERS:  Object               - A namespace node
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Validate the node and return the node's ACPI name.
- *
- ******************************************************************************/
-
-char *acpi_ut_get_node_name(void *object)
-{
-	struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
-
-	/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
-
-	if (!object) {
-		return ("NULL");
-	}
-
-	/* Check for Root node */
-
-	if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
-		return ("\"\\\" ");
-	}
-
-	/* Descriptor must be a namespace node */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
-		return ("####");
-	}
-
-	/* Name must be a valid ACPI name */
-
-	if (!acpi_ut_valid_acpi_name(node->name.integer)) {
-		node->name.integer = acpi_ut_repair_name(node->name.ascii);
-	}
-
-	/* Return the name */
-
-	return (node->name.ascii);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_descriptor_name
- *
- * PARAMETERS:  Object               - An ACPI object
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Validate object and return the descriptor type
- *
- ******************************************************************************/
-
-/* Printable names of object descriptor types */
-
-static const char *acpi_gbl_desc_type_names[] = {
-	/* 00 */ "Invalid",
-	/* 01 */ "Cached",
-	/* 02 */ "State-Generic",
-	/* 03 */ "State-Update",
-	/* 04 */ "State-Package",
-	/* 05 */ "State-Control",
-	/* 06 */ "State-RootParseScope",
-	/* 07 */ "State-ParseScope",
-	/* 08 */ "State-WalkScope",
-	/* 09 */ "State-Result",
-	/* 10 */ "State-Notify",
-	/* 11 */ "State-Thread",
-	/* 12 */ "Walk",
-	/* 13 */ "Parser",
-	/* 14 */ "Operand",
-	/* 15 */ "Node"
-};
-
-char *acpi_ut_get_descriptor_name(void *object)
-{
-
-	if (!object) {
-		return ("NULL OBJECT");
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
-		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
-	}
-
-	return (ACPI_CAST_PTR(char,
-			      acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
-						       (object)]));
-
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_reference_name
- *
- * PARAMETERS:  Object               - An ACPI reference object
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Decode a reference object sub-type to a string.
- *
- ******************************************************************************/
-
-/* Printable names of reference object sub-types */
-
-static const char *acpi_gbl_ref_class_names[] = {
-	/* 00 */ "Local",
-	/* 01 */ "Argument",
-	/* 02 */ "RefOf",
-	/* 03 */ "Index",
-	/* 04 */ "DdbHandle",
-	/* 05 */ "Named Object",
-	/* 06 */ "Debug"
-};
-
-const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
-{
-	if (!object)
-		return "NULL Object";
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
-		return "Not an Operand object";
-
-	if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
-		return "Not a Reference object";
-
-	if (object->reference.class > ACPI_REFCLASS_MAX)
-		return "Unknown Reference class";
-
-	return acpi_gbl_ref_class_names[object->reference.class];
-}
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_mutex_name
- *
- * PARAMETERS:  mutex_id        - The predefined ID for this mutex.
- *
- * RETURN:      String containing the name of the mutex. Always returns a valid
- *              pointer.
- *
- * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-char *acpi_ut_get_mutex_name(u32 mutex_id)
-{
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return ("Invalid Mutex ID");
-	}
-
-	return (acpi_gbl_mutex_names[mutex_id]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_notify_name
- *
- * PARAMETERS:  notify_value    - Value from the Notify() request
- *
- * RETURN:      String corresponding to the Notify Value.
- *
- * DESCRIPTION: Translate a Notify Value to a notify namestring.
- *
- ******************************************************************************/
-
-/* Names for Notify() values, used for debug output */
-
-static const char *acpi_gbl_notify_value_names[] = {
-	"Bus Check",
-	"Device Check",
-	"Device Wake",
-	"Eject Request",
-	"Device Check Light",
-	"Frequency Mismatch",
-	"Bus Mode Mismatch",
-	"Power Fault",
-	"Capabilities Check",
-	"Device PLD Check",
-	"Reserved",
-	"System Locality Update"
-};
-
-const char *acpi_ut_get_notify_name(u32 notify_value)
-{
-
-	if (notify_value <= ACPI_NOTIFY_MAX) {
-		return (acpi_gbl_notify_value_names[notify_value]);
-	} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
-		return ("Reserved");
-	} else {		/* Greater or equal to 0x80 */
-
-		return ("**Device Specific**");
-	}
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_object_type
- *
- * PARAMETERS:  Type            - Object type to be validated
- *
- * RETURN:      TRUE if valid object type, FALSE otherwise
- *
- * DESCRIPTION: Validate an object type
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_object_type(acpi_object_type type)
-{
-
-	if (type > ACPI_TYPE_LOCAL_MAX) {
-
-		/* Note: Assumes all TYPEs are contiguous (external/local) */
-
-		return (FALSE);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ut_init_globals
  *
  * PARAMETERS:  None
@@ -806,6 +323,7 @@
 	acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
 	acpi_gbl_osi_data = 0;
 	acpi_gbl_osi_mutex = NULL;
+	acpi_gbl_reg_methods_executed = FALSE;
 
 	/* Hardware oriented */
 
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index fca34cc..9ecf6fe 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -21,6 +21,13 @@
 	  by firmware to produce more valuable hardware error
 	  information for Linux.
 
+config ACPI_APEI_PCIEAER
+	bool "APEI PCIe AER logging/recovering support"
+	depends on ACPI_APEI && PCIEAER
+	help
+	  PCIe AER errors may be reported via APEI firmware first mode.
+	  Turn on this option to enable the corresponding support.
+
 config ACPI_APEI_EINJ
 	tristate "APEI Error INJection (EINJ)"
 	depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 31464a0..5d41894 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
 #include <linux/time.h>
 #include <linux/cper.h>
 #include <linux/acpi.h>
+#include <linux/aer.h>
 
 /*
  * CPER record ID need to be unique even after reboot, because record
@@ -70,8 +71,8 @@
  * If the output length is longer than 80, multiple line will be
  * printed, with @pfx is printed at the beginning of each line.
  */
-static void cper_print_bits(const char *pfx, unsigned int bits,
-			    const char *strs[], unsigned int strs_size)
+void cper_print_bits(const char *pfx, unsigned int bits,
+		     const char *strs[], unsigned int strs_size)
 {
 	int i, len = 0;
 	const char *str;
@@ -81,6 +82,8 @@
 		if (!(bits & (1U << i)))
 			continue;
 		str = strs[i];
+		if (!str)
+			continue;
 		if (len && len + strlen(str) + 2 > 80) {
 			printk("%s\n", buf);
 			len = 0;
@@ -243,7 +246,8 @@
 	"root complex event collector",
 };
 
-static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+			    const struct acpi_hest_generic_data *gdata)
 {
 	if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
 		printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -276,6 +280,12 @@
 		printk(
 	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
 	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+	if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+		struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
+		cper_print_aer(pfx, gdata->error_severity, aer_regs);
+	}
+#endif
 }
 
 static const char *apei_estatus_section_flag_strs[] = {
@@ -322,7 +332,7 @@
 		struct cper_sec_pcie *pcie = (void *)(gdata + 1);
 		printk("%s""section_type: PCIe error\n", pfx);
 		if (gdata->error_data_length >= sizeof(*pcie))
-			cper_print_pcie(pfx, pcie);
+			cper_print_pcie(pfx, pcie, gdata);
 		else
 			goto err_section_too_small;
 	} else
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index de73caf..a4cfb64 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -43,12 +43,27 @@
 
 static int erst_dbg_open(struct inode *inode, struct file *file)
 {
+	int rc, *pos;
+
 	if (erst_disable)
 		return -ENODEV;
 
+	pos = (int *)&file->private_data;
+
+	rc = erst_get_record_id_begin(pos);
+	if (rc)
+		return rc;
+
 	return nonseekable_open(inode, file);
 }
 
+static int erst_dbg_release(struct inode *inode, struct file *file)
+{
+	erst_get_record_id_end();
+
+	return 0;
+}
+
 static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 {
 	int rc;
@@ -79,18 +94,20 @@
 static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf,
 			     size_t usize, loff_t *off)
 {
-	int rc;
+	int rc, *pos;
 	ssize_t len = 0;
 	u64 id;
 
-	if (*off != 0)
+	if (*off)
 		return -EINVAL;
 
 	if (mutex_lock_interruptible(&erst_dbg_mutex) != 0)
 		return -EINTR;
 
+	pos = (int *)&filp->private_data;
+
 retry_next:
-	rc = erst_get_next_record_id(&id);
+	rc = erst_get_record_id_next(pos, &id);
 	if (rc)
 		goto out;
 	/* no more record */
@@ -181,6 +198,7 @@
 static const struct file_operations erst_dbg_ops = {
 	.owner		= THIS_MODULE,
 	.open		= erst_dbg_open,
+	.release	= erst_dbg_release,
 	.read		= erst_dbg_read,
 	.write		= erst_dbg_write,
 	.unlocked_ioctl	= erst_dbg_ioctl,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cf6db6b7..8ff8c32 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -429,6 +429,22 @@
 }
 EXPORT_SYMBOL_GPL(erst_get_record_count);
 
+#define ERST_RECORD_ID_CACHE_SIZE_MIN	16
+#define ERST_RECORD_ID_CACHE_SIZE_MAX	1024
+
+struct erst_record_id_cache {
+	struct mutex lock;
+	u64 *entries;
+	int len;
+	int size;
+	int refcount;
+};
+
+static struct erst_record_id_cache erst_record_id_cache = {
+	.lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
+	.refcount = 0,
+};
+
 static int __erst_get_next_record_id(u64 *record_id)
 {
 	struct apei_exec_context ctx;
@@ -443,26 +459,179 @@
 	return 0;
 }
 
+int erst_get_record_id_begin(int *pos)
+{
+	int rc;
+
+	if (erst_disable)
+		return -ENODEV;
+
+	rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+	if (rc)
+		return rc;
+	erst_record_id_cache.refcount++;
+	mutex_unlock(&erst_record_id_cache.lock);
+
+	*pos = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
+
+/* erst_record_id_cache.lock must be held by caller */
+static int __erst_record_id_cache_add_one(void)
+{
+	u64 id, prev_id, first_id;
+	int i, rc;
+	u64 *entries;
+	unsigned long flags;
+
+	id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
+retry:
+	raw_spin_lock_irqsave(&erst_lock, flags);
+	rc = __erst_get_next_record_id(&id);
+	raw_spin_unlock_irqrestore(&erst_lock, flags);
+	if (rc == -ENOENT)
+		return 0;
+	if (rc)
+		return rc;
+	if (id == APEI_ERST_INVALID_RECORD_ID)
+		return 0;
+	/* can not skip current ID, or loop back to first ID */
+	if (id == prev_id || id == first_id)
+		return 0;
+	if (first_id == APEI_ERST_INVALID_RECORD_ID)
+		first_id = id;
+	prev_id = id;
+
+	entries = erst_record_id_cache.entries;
+	for (i = 0; i < erst_record_id_cache.len; i++) {
+		if (entries[i] == id)
+			break;
+	}
+	/* record id already in cache, try next */
+	if (i < erst_record_id_cache.len)
+		goto retry;
+	if (erst_record_id_cache.len >= erst_record_id_cache.size) {
+		int new_size, alloc_size;
+		u64 *new_entries;
+
+		new_size = erst_record_id_cache.size * 2;
+		new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
+				     ERST_RECORD_ID_CACHE_SIZE_MAX);
+		if (new_size <= erst_record_id_cache.size) {
+			if (printk_ratelimit())
+				pr_warning(FW_WARN ERST_PFX
+					   "too many record ID!\n");
+			return 0;
+		}
+		alloc_size = new_size * sizeof(entries[0]);
+		if (alloc_size < PAGE_SIZE)
+			new_entries = kmalloc(alloc_size, GFP_KERNEL);
+		else
+			new_entries = vmalloc(alloc_size);
+		if (!new_entries)
+			return -ENOMEM;
+		memcpy(new_entries, entries,
+		       erst_record_id_cache.len * sizeof(entries[0]));
+		if (erst_record_id_cache.size < PAGE_SIZE)
+			kfree(entries);
+		else
+			vfree(entries);
+		erst_record_id_cache.entries = entries = new_entries;
+		erst_record_id_cache.size = new_size;
+	}
+	entries[i] = id;
+	erst_record_id_cache.len++;
+
+	return 1;
+}
+
 /*
  * Get the record ID of an existing error record on the persistent
  * storage. If there is no error record on the persistent storage, the
  * returned record_id is APEI_ERST_INVALID_RECORD_ID.
  */
-int erst_get_next_record_id(u64 *record_id)
+int erst_get_record_id_next(int *pos, u64 *record_id)
 {
-	int rc;
-	unsigned long flags;
+	int rc = 0;
+	u64 *entries;
 
 	if (erst_disable)
 		return -ENODEV;
 
-	raw_spin_lock_irqsave(&erst_lock, flags);
-	rc = __erst_get_next_record_id(record_id);
-	raw_spin_unlock_irqrestore(&erst_lock, flags);
+	/* must be enclosed by erst_get_record_id_begin/end */
+	BUG_ON(!erst_record_id_cache.refcount);
+	BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
+
+	mutex_lock(&erst_record_id_cache.lock);
+	entries = erst_record_id_cache.entries;
+	for (; *pos < erst_record_id_cache.len; (*pos)++)
+		if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
+			break;
+	/* found next record id in cache */
+	if (*pos < erst_record_id_cache.len) {
+		*record_id = entries[*pos];
+		(*pos)++;
+		goto out_unlock;
+	}
+
+	/* Try to add one more record ID to cache */
+	rc = __erst_record_id_cache_add_one();
+	if (rc < 0)
+		goto out_unlock;
+	/* successfully add one new ID */
+	if (rc == 1) {
+		*record_id = erst_record_id_cache.entries[*pos];
+		(*pos)++;
+		rc = 0;
+	} else {
+		*pos = -1;
+		*record_id = APEI_ERST_INVALID_RECORD_ID;
+	}
+out_unlock:
+	mutex_unlock(&erst_record_id_cache.lock);
 
 	return rc;
 }
-EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+EXPORT_SYMBOL_GPL(erst_get_record_id_next);
+
+/* erst_record_id_cache.lock must be held by caller */
+static void __erst_record_id_cache_compact(void)
+{
+	int i, wpos = 0;
+	u64 *entries;
+
+	if (erst_record_id_cache.refcount)
+		return;
+
+	entries = erst_record_id_cache.entries;
+	for (i = 0; i < erst_record_id_cache.len; i++) {
+		if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
+			continue;
+		if (wpos != i)
+			memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
+		wpos++;
+	}
+	erst_record_id_cache.len = wpos;
+}
+
+void erst_get_record_id_end(void)
+{
+	/*
+	 * erst_disable != 0 should be detected by invoker via the
+	 * return value of erst_get_record_id_begin/next, so this
+	 * function should not be called for erst_disable != 0.
+	 */
+	BUG_ON(erst_disable);
+
+	mutex_lock(&erst_record_id_cache.lock);
+	erst_record_id_cache.refcount--;
+	BUG_ON(erst_record_id_cache.refcount < 0);
+	__erst_record_id_cache_compact();
+	mutex_unlock(&erst_record_id_cache.lock);
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_end);
 
 static int __erst_write_to_storage(u64 offset)
 {
@@ -703,56 +872,34 @@
 }
 EXPORT_SYMBOL_GPL(erst_read);
 
-/*
- * If return value > buflen, the buffer size is not big enough,
- * else if return value = 0, there is no more record to read,
- * else if return value < 0, something goes wrong,
- * else everything is OK, and return value is record length
- */
-ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
-{
-	int rc;
-	ssize_t len;
-	unsigned long flags;
-	u64 record_id;
-
-	if (erst_disable)
-		return -ENODEV;
-
-	raw_spin_lock_irqsave(&erst_lock, flags);
-	rc = __erst_get_next_record_id(&record_id);
-	if (rc) {
-		raw_spin_unlock_irqrestore(&erst_lock, flags);
-		return rc;
-	}
-	/* no more record */
-	if (record_id == APEI_ERST_INVALID_RECORD_ID) {
-		raw_spin_unlock_irqrestore(&erst_lock, flags);
-		return 0;
-	}
-
-	len = __erst_read(record_id, record, buflen);
-	raw_spin_unlock_irqrestore(&erst_lock, flags);
-
-	return len;
-}
-EXPORT_SYMBOL_GPL(erst_read_next);
-
 int erst_clear(u64 record_id)
 {
-	int rc;
+	int rc, i;
 	unsigned long flags;
+	u64 *entries;
 
 	if (erst_disable)
 		return -ENODEV;
 
+	rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+	if (rc)
+		return rc;
 	raw_spin_lock_irqsave(&erst_lock, flags);
 	if (erst_erange.attr & ERST_RANGE_NVRAM)
 		rc = __erst_clear_from_nvram(record_id);
 	else
 		rc = __erst_clear_from_storage(record_id);
 	raw_spin_unlock_irqrestore(&erst_lock, flags);
-
+	if (rc)
+		goto out;
+	entries = erst_record_id_cache.entries;
+	for (i = 0; i < erst_record_id_cache.len; i++) {
+		if (entries[i] == record_id)
+			entries[i] = APEI_ERST_INVALID_RECORD_ID;
+	}
+	__erst_record_id_cache_compact();
+out:
+	mutex_unlock(&erst_record_id_cache.lock);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(erst_clear);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1..384f7ab 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@
 			size_t count, loff_t *ppos)
 {
 	static char *buf;
-	static int uncopied_bytes;
+	static u32 max_size;
+	static u32 uncopied_bytes;
+
 	struct acpi_table_header table;
 	acpi_status status;
 
@@ -37,19 +39,24 @@
 		if (copy_from_user(&table, user_buf,
 				   sizeof(struct acpi_table_header)))
 			return -EFAULT;
-		uncopied_bytes = table.length;
-		buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+		uncopied_bytes = max_size = table.length;
+		buf = kzalloc(max_size, GFP_KERNEL);
 		if (!buf)
 			return -ENOMEM;
 	}
 
-	if (uncopied_bytes < count) {
-		kfree(buf);
+	if (buf == NULL)
 		return -EINVAL;
-	}
+
+	if ((*ppos > max_size) ||
+	    (*ppos + count > max_size) ||
+	    (*ppos + count < count) ||
+	    (count > uncopied_bytes))
+		return -EINVAL;
 
 	if (copy_from_user(buf + (*ppos), user_buf, count)) {
 		kfree(buf);
+		buf = NULL;
 		return -EFAULT;
 	}
 
@@ -59,6 +66,7 @@
 	if (!uncopied_bytes) {
 		status = acpi_install_method(buf);
 		kfree(buf);
+		buf = NULL;
 		if (ACPI_FAILURE(status))
 			return -EINVAL;
 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index fa5a1df..096787b 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -26,6 +26,7 @@
 	unsigned int size;
 	void *kaddr;
 	void *data;
+	bool unmap;
 	struct list_head node;
 };
 
@@ -44,6 +45,9 @@
 {
 	struct nvs_page *entry, *next;
 
+	pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n",
+		start, size);
+
 	while (size > 0) {
 		unsigned int nr_bytes;
 
@@ -81,7 +85,13 @@
 			free_page((unsigned long)entry->data);
 			entry->data = NULL;
 			if (entry->kaddr) {
-				iounmap(entry->kaddr);
+				if (entry->unmap) {
+					iounmap(entry->kaddr);
+					entry->unmap = false;
+				} else {
+					acpi_os_unmap_memory(entry->kaddr,
+							     entry->size);
+				}
 				entry->kaddr = NULL;
 			}
 		}
@@ -115,8 +125,14 @@
 
 	list_for_each_entry(entry, &nvs_list, node)
 		if (entry->data) {
-			entry->kaddr = acpi_os_ioremap(entry->phys_start,
-						    entry->size);
+			unsigned long phys = entry->phys_start;
+			unsigned int size = entry->size;
+
+			entry->kaddr = acpi_os_get_iomem(phys, size);
+			if (!entry->kaddr) {
+				entry->kaddr = acpi_os_ioremap(phys, size);
+				entry->unmap = !!entry->kaddr;
+			}
 			if (!entry->kaddr) {
 				suspend_nvs_free();
 				return -ENOMEM;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c90c76a..45c6ac8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -76,7 +76,6 @@
 extern char line_buf[80];
 #endif				/*ENABLE_DEBUGGER */
 
-static unsigned int acpi_irq_irq;
 static acpi_osd_handler acpi_irq_handler;
 static void *acpi_irq_context;
 static struct workqueue_struct *kacpid_wq;
@@ -105,11 +104,11 @@
 	void __iomem *virt;
 	acpi_physical_address phys;
 	acpi_size size;
-	struct kref ref;
+	unsigned long refcount;
 };
 
 static LIST_HEAD(acpi_ioremaps);
-static DEFINE_SPINLOCK(acpi_ioremap_lock);
+static DEFINE_MUTEX(acpi_ioremap_lock);
 
 static void __init acpi_osi_setup_late(void);
 
@@ -285,6 +284,22 @@
 	return NULL;
 }
 
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+{
+	struct acpi_ioremap *map;
+	void __iomem *virt = NULL;
+
+	mutex_lock(&acpi_ioremap_lock);
+	map = acpi_map_lookup(phys, size);
+	if (map) {
+		virt = map->virt + (phys - map->phys);
+		map->refcount++;
+	}
+	mutex_unlock(&acpi_ioremap_lock);
+	return virt;
+}
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+
 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
 static struct acpi_ioremap *
 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
@@ -302,8 +317,7 @@
 void __iomem *__init_refok
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
-	struct acpi_ioremap *map, *tmp_map;
-	unsigned long flags;
+	struct acpi_ioremap *map;
 	void __iomem *virt;
 	acpi_physical_address pg_off;
 	acpi_size pg_sz;
@@ -316,14 +330,25 @@
 	if (!acpi_gbl_permanent_mmap)
 		return __acpi_map_table((unsigned long)phys, size);
 
+	mutex_lock(&acpi_ioremap_lock);
+	/* Check if there's a suitable mapping already. */
+	map = acpi_map_lookup(phys, size);
+	if (map) {
+		map->refcount++;
+		goto out;
+	}
+
 	map = kzalloc(sizeof(*map), GFP_KERNEL);
-	if (!map)
+	if (!map) {
+		mutex_unlock(&acpi_ioremap_lock);
 		return NULL;
+	}
 
 	pg_off = round_down(phys, PAGE_SIZE);
 	pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
 	virt = acpi_os_ioremap(pg_off, pg_sz);
 	if (!virt) {
+		mutex_unlock(&acpi_ioremap_lock);
 		kfree(map);
 		return NULL;
 	}
@@ -332,62 +357,51 @@
 	map->virt = virt;
 	map->phys = pg_off;
 	map->size = pg_sz;
-	kref_init(&map->ref);
+	map->refcount = 1;
 
-	spin_lock_irqsave(&acpi_ioremap_lock, flags);
-	/* Check if page has already been mapped. */
-	tmp_map = acpi_map_lookup(phys, size);
-	if (tmp_map) {
-		kref_get(&tmp_map->ref);
-		spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
-		iounmap(map->virt);
-		kfree(map);
-		return tmp_map->virt + (phys - tmp_map->phys);
-	}
 	list_add_tail_rcu(&map->list, &acpi_ioremaps);
-	spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
 
+ out:
+	mutex_unlock(&acpi_ioremap_lock);
 	return map->virt + (phys - map->phys);
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
-static void acpi_kref_del_iomap(struct kref *ref)
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
 {
-	struct acpi_ioremap *map;
+	if (!--map->refcount)
+		list_del_rcu(&map->list);
+}
 
-	map = container_of(ref, struct acpi_ioremap, ref);
-	list_del_rcu(&map->list);
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+{
+	if (!map->refcount) {
+		synchronize_rcu();
+		iounmap(map->virt);
+		kfree(map);
+	}
 }
 
 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
 {
 	struct acpi_ioremap *map;
-	unsigned long flags;
-	int del;
 
 	if (!acpi_gbl_permanent_mmap) {
 		__acpi_unmap_table(virt, size);
 		return;
 	}
 
-	spin_lock_irqsave(&acpi_ioremap_lock, flags);
+	mutex_lock(&acpi_ioremap_lock);
 	map = acpi_map_lookup_virt(virt, size);
 	if (!map) {
-		spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
-		printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
-		dump_stack();
+		mutex_unlock(&acpi_ioremap_lock);
+		WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
 		return;
 	}
+	acpi_os_drop_map_ref(map);
+	mutex_unlock(&acpi_ioremap_lock);
 
-	del = kref_put(&map->ref, acpi_kref_del_iomap);
-	spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
-
-	if (!del)
-		return;
-
-	synchronize_rcu();
-	iounmap(map->virt);
-	kfree(map);
+	acpi_os_map_cleanup(map);
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
@@ -397,7 +411,7 @@
 		__acpi_unmap_table(virt, size);
 }
 
-int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
 {
 	void __iomem *virt;
 
@@ -413,13 +427,10 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
 
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
 {
-	void __iomem *virt;
-	unsigned long flags;
-	acpi_size size = addr->bit_width / 8;
+	struct acpi_ioremap *map;
 
 	if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
 		return;
@@ -427,13 +438,17 @@
 	if (!addr->address || !addr->bit_width)
 		return;
 
-	spin_lock_irqsave(&acpi_ioremap_lock, flags);
-	virt = acpi_map_vaddr_lookup(addr->address, size);
-	spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+	mutex_lock(&acpi_ioremap_lock);
+	map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+	if (!map) {
+		mutex_unlock(&acpi_ioremap_lock);
+		return;
+	}
+	acpi_os_drop_map_ref(map);
+	mutex_unlock(&acpi_ioremap_lock);
 
-	acpi_os_unmap_memory(virt, size);
+	acpi_os_map_cleanup(map);
 }
-EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
@@ -516,11 +531,15 @@
 	acpi_irq_stats_init();
 
 	/*
-	 * Ignore the GSI from the core, and use the value in our copy of the
-	 * FADT. It may not be the same if an interrupt source override exists
-	 * for the SCI.
+	 * ACPI interrupts different from the SCI in our copy of the FADT are
+	 * not supported.
 	 */
-	gsi = acpi_gbl_FADT.sci_interrupt;
+	if (gsi != acpi_gbl_FADT.sci_interrupt)
+		return AE_BAD_PARAMETER;
+
+	if (acpi_irq_handler)
+		return AE_ALREADY_ACQUIRED;
+
 	if (acpi_gsi_to_irq(gsi, &irq) < 0) {
 		printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
 		       gsi);
@@ -531,20 +550,20 @@
 	acpi_irq_context = context;
 	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+		acpi_irq_handler = NULL;
 		return AE_NOT_ACQUIRED;
 	}
-	acpi_irq_irq = irq;
 
 	return AE_OK;
 }
 
 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
 {
-	if (irq) {
-		free_irq(irq, acpi_irq);
-		acpi_irq_handler = NULL;
-		acpi_irq_irq = 0;
-	}
+	if (irq != acpi_gbl_FADT.sci_interrupt)
+		return AE_BAD_PARAMETER;
+
+	free_irq(irq, acpi_irq);
+	acpi_irq_handler = NULL;
 
 	return AE_OK;
 }
@@ -1603,7 +1622,7 @@
 acpi_status acpi_os_terminate(void)
 {
 	if (acpi_irq_handler) {
-		acpi_os_remove_interrupt_handler(acpi_irq_irq,
+		acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
 						 acpi_irq_handler);
 	}
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04f..77fc76f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@
 			struct block_device *bdev = opened_bdev[cnt];
 			if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
 				continue;
-			__invalidate_device(bdev);
+			__invalidate_device(bdev, true);
 		}
 		mutex_unlock(&open_lock);
 	} else {
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfe..5feebe2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
 #define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
 
 #define I915_IFPADDR    0x60
+#define I830_HIC        0x70
 
 /* Intel 965G registers */
 #define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d32..0d09b53 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
+#include <linux/delay.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
@@ -70,12 +71,8 @@
 	u32 __iomem *gtt;		/* I915G */
 	bool clear_fake_agp; /* on first access via agp, fill with scratch */
 	int num_dcache_entries;
-	union {
-		void __iomem *i9xx_flush_page;
-		void *i8xx_flush_page;
-	};
+	void __iomem *i9xx_flush_page;
 	char *i81x_gtt_table;
-	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
 	struct page *scratch_page;
@@ -722,28 +719,6 @@
 
 static void i830_cleanup(void)
 {
-	if (intel_private.i8xx_flush_page) {
-		kunmap(intel_private.i8xx_flush_page);
-		intel_private.i8xx_flush_page = NULL;
-	}
-
-	__free_page(intel_private.i8xx_page);
-	intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-	/* return if we've already set the flush mechanism up */
-	if (intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
-	if (!intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-	if (!intel_private.i8xx_flush_page)
-		i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@
  */
 static void i830_chipset_flush(void)
 {
-	unsigned int *pg = intel_private.i8xx_flush_page;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 
-	memset(pg, 0, 1024);
+	/* Forcibly evict everything from the CPU write buffers.
+	 * clflush appears to be insufficient.
+	 */
+	wbinvd_on_all_cpus();
 
-	if (cpu_has_clflush)
-		clflush_cache_range(pg, 1024);
-	else if (wbinvd_on_all_cpus() != 0)
-		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+	/* Now we've only seen documents for this magic bit on 855GM,
+	 * we hope it exists for the other gen2 chipsets...
+	 *
+	 * Also works as advertised on my 845G.
+	 */
+	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+	       intel_private.registers+I830_HIC);
+
+	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+		if (time_after(jiffies, timeout))
+			break;
+
+		udelay(50);
+	}
 }
 
 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@
 
 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
 
-	intel_i830_setup_flush();
-
 	return 0;
 }
 
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 36e0fa1..1f46f1c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@
 		    tpm_protected_ordinal_duration[ordinal &
 						   TPM_PROTECTED_ORDINAL_MASK];
 
-	if (duration_idx != TPM_UNDEFINED) {
+	if (duration_idx != TPM_UNDEFINED)
 		duration = chip->vendor.duration[duration_idx];
-		/* if duration is 0, it's because chip->vendor.duration wasn't */
-		/* filled yet, so we set the lowest timeout just to give enough */
-		/* time for tpm_get_timeouts() to succeed */
-		return (duration <= 0 ? HZ : duration);
-	} else
+	if (duration <= 0)
 		return 2 * 60 * HZ;
+	else
+		return duration;
 }
 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
 
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53120a7..28d1d3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1012,7 +1012,8 @@
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	int crtc, ret = 0;
+	int ret = 0;
+	unsigned int crtc;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9..79a04fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@
 static bool
 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 {
-	int tile_width;
+	int tile_width, tile_height;
 
 	/* Linear is always fine */
 	if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@
 		}
 	}
 
+	if (IS_GEN2(dev) ||
+	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+		tile_height = 32;
+	else
+		tile_height = 8;
+	/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
+	 * number of tile rows. */
+	if (IS_GEN2(dev))
+		tile_height *= 2;
+
+	/* Size needs to be aligned to a full tile row */
+	if (size & (tile_height * stride - 1))
+		return false;
+
 	/* 965+ just needs multiples of tile width */
 	if (INTEL_INFO(dev)->gen >= 4) {
 		if (stride & (tile_width - 1))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dc..8a9e08b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_encoder *encoder;
 
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 		if (encoder->hot_plug)
 			encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@
 	} else {
 		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-		hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
-		I915_WRITE(FDI_RXA_IMR, 0);
-		I915_WRITE(FDI_RXB_IMR, 0);
+		hotplug_mask |= SDE_AUX_MASK;
 	}
 
 	dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b00653..e79b25b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1630,19 +1630,19 @@
 		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
 		wait_event(dev_priv->pending_flip_queue,
+			   atomic_read(&dev_priv->mm.wedged) ||
 			   atomic_read(&obj->pending_flip) == 0);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 		 * current scanout is retired before unpinning the old
 		 * framebuffer.
+		 *
+		 * This should only fail upon a hung GPU, in which case we
+		 * can safely continue.
 		 */
 		ret = i915_gem_object_flush_gpu(obj, false);
-		if (ret) {
-			i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
+		(void) ret;
 	}
 
 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@
 		   atomic_read(&obj->pending_flip) == 0);
 }
 
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+
+	/*
+	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+	 * must be driven by its own crtc; no sharing is possible.
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+		if (encoder->base.crtc != crtc)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_EDP:
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				return false;
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 reg, temp;
+	bool is_pch_port = false;
 
 	if (intel_crtc->active)
 		return;
@@ -2066,7 +2092,56 @@
 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
 	}
 
-	ironlake_fdi_enable(crtc);
+	is_pch_port = intel_crtc_driving_pch(crtc);
+
+	if (is_pch_port)
+		ironlake_fdi_enable(crtc);
+	else {
+		/* disable CPU FDI tx and PCH FDI rx */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+		POSTING_READ(reg);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(0x7 << 16);
+		temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+		I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+		POSTING_READ(reg);
+		udelay(100);
+
+		/* Ironlake workaround, disable clock pointer after downing FDI */
+		if (HAS_PCH_IBX(dev))
+			I915_WRITE(FDI_RX_CHICKEN(pipe),
+				   I915_READ(FDI_RX_CHICKEN(pipe) &
+					     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+		/* still set train pattern 1 */
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+		I915_WRITE(reg, temp);
+
+		reg = FDI_RX_CTL(pipe);
+		temp = I915_READ(reg);
+		if (HAS_PCH_CPT(dev)) {
+			temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+			temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+		} else {
+			temp &= ~FDI_LINK_TRAIN_NONE;
+			temp |= FDI_LINK_TRAIN_PATTERN_1;
+		}
+		/* BPC in FDI rx is consistent with that in PIPECONF */
+		temp &= ~(0x07 << 16);
+		temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(100);
+	}
 
 	/* Enable panel fitting for LVDS */
 	if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@
 		intel_flush_display_plane(dev, plane);
 	}
 
+	/* Skip the PCH stuff if possible */
+	if (!is_pch_port)
+		goto done;
+
 	/* For PCH output, training FDI link */
 	if (IS_GEN6(dev))
 		gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@
 	I915_WRITE(reg, temp | TRANS_ENABLE);
 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
 	intel_crtc_load_lut(crtc);
 	intel_update_fbc(dev);
 	intel_crtc_update_cursor(crtc, true);
@@ -6496,7 +6575,7 @@
 		POSTING_READ(RSTDBYCTL);
 	}
 
-	ironlake_disable_rc6(dev);
+	ironlake_teardown_rc6(dev);
 }
 
 static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9..a521840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
-	nouveau_vm_put(&nvbo->vma);
+	if (nvbo->vma.node) {
+		nouveau_vm_unmap(&nvbo->vma);
+		nouveau_vm_put(&nvbo->vma);
+	}
 	kfree(nvbo);
 }
 
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822a..d46c0c7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@
 	{ "ad7414", 0 },
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
 
 static struct i2c_driver ad7414_driver = {
 	.driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843..5cc3e37 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@
 	{ "adt7411", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
 
 static struct i2c_driver adt7411_driver = {
 	.driver		= {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767..0ed7f6b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@
 
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 	conf = linear_conf(mddev, mddev->raid_disks);
 
 	if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ec..818313e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@
 {
 	mddev_t *mddev, *new = NULL;
 
+	if (unit && MAJOR(unit) != MD_MAJOR)
+		unit &= ~((1<<MdpMinorShift)-1);
+
  retry:
 	spin_lock(&all_mddevs_lock);
 
@@ -4138,10 +4141,10 @@
 	}
 
 	mddev->array_sectors = sectors;
-	set_capacity(mddev->gendisk, mddev->array_sectors);
-	if (mddev->pers)
+	if (mddev->pers) {
+		set_capacity(mddev->gendisk, mddev->array_sectors);
 		revalidate_disk(mddev->gendisk);
-
+	}
 	return len;
 }
 
@@ -4624,6 +4627,7 @@
 	}
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	mddev->changed = 1;
 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 out:
 	return err;
@@ -4712,6 +4716,7 @@
 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
 	mddev->recovery = 0;
 	mddev->in_sync = 0;
+	mddev->changed = 0;
 	mddev->degraded = 0;
 	mddev->safemode = 0;
 	mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@
 
 		set_capacity(disk, 0);
 		mutex_unlock(&mddev->open_mutex);
+		mddev->changed = 1;
 		revalidate_disk(disk);
 
 		if (mddev->ro)
@@ -6011,7 +6017,7 @@
 	atomic_inc(&mddev->openers);
 	mutex_unlock(&mddev->open_mutex);
 
-	check_disk_size_change(mddev->gendisk, bdev);
+	check_disk_change(bdev);
  out:
 	return err;
 }
@@ -6026,6 +6032,21 @@
 
 	return 0;
 }
+
+static int md_media_changed(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	mddev->changed = 0;
+	return 0;
+}
 static const struct block_device_operations md_fops =
 {
 	.owner		= THIS_MODULE,
@@ -6036,6 +6057,8 @@
 	.compat_ioctl	= md_compat_ioctl,
 #endif
 	.getgeo		= md_getgeo,
+	.media_changed  = md_media_changed,
+	.revalidate_disk= md_revalidate,
 };
 
 static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b85..12215d4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@
 	atomic_t			active;		/* general refcount */
 	atomic_t			openers;	/* number of active opens */
 
+	int				changed;	/* True if we might need to
+							 * reread partition info */
 	int				degraded;	/* whether md should consider
 							 * adding a spare
 							 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf3..3a62d44 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@
 	 * bookkeeping area. [whatever we allocate in multipath_run(),
 	 * should be freed in multipath_stop()]
 	 */
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
 	mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a968..c0ac457 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	/* if private is not null, we are here after takeover */
 	if (mddev->private == NULL) {
@@ -670,6 +669,7 @@
 	mddev->new_layout = 0;
 	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
 	mddev->delta_disks = 1 - mddev->raid_disks;
+	mddev->raid_disks = 1;
 	/* make sure it will be not marked as dirty */
 	mddev->recovery_cp = MaxSector;
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa3..06cd712 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Only take the spinlock to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to
 		 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@
 		atomic_inc(&r1_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@
 	if (IS_ERR(conf))
 		return PTR_ERR(conf);
 
-	mddev->queue->queue_lock = &conf->device_lock;
 	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b2..747d061 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Spinlock only taken to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to disk
 		 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@
 		atomic_inc(&r10_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 
@@ -2304,8 +2307,6 @@
 	if (!conf)
 		goto out;
 
-	mddev->queue->queue_lock = &conf->device_lock;
-
 	mddev->thread = conf->thread;
 	conf->thread = NULL;
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7028128..78536fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@
 
 		mddev->queue->backing_dev_info.congested_data = mddev;
 		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-		mddev->queue->queue_lock = &conf->device_lock;
 		mddev->queue->unplug_fn = raid5_unplug_queue;
 
 		chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd4..ea15800 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
 #
 
 menuconfig NFC_DEVICES
-	bool "NFC devices"
+	bool "Near Field Communication (NFC) devices"
 	default n
 	---help---
 	  You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae6472..724f65d 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@
 struct pn544_info {
 	struct miscdevice miscdev;
 	struct i2c_client *i2c_dev;
-	struct regulator_bulk_data regs[2];
+	struct regulator_bulk_data regs[3];
 
 	enum pn544_state state;
 	wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@
 
 static const char reg_vdd_io[]	= "Vdd_IO";
 static const char reg_vbat[]	= "VBat";
+static const char reg_vsim[]	= "VSim";
 
 /* sysfs interface */
 static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@
 
 	info->regs[0].supply = reg_vdd_io;
 	info->regs[1].supply = reg_vbat;
+	info->regs[2].supply = reg_vsim;
 	r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
 				 info->regs);
 	if (r < 0)
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 80c11d1..3eb7708 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -35,13 +35,6 @@
 					PCI_ERR_UNC_UNX_COMP|		\
 					PCI_ERR_UNC_MALF_TLP)
 
-struct header_log_regs {
-	unsigned int dw0;
-	unsigned int dw1;
-	unsigned int dw2;
-	unsigned int dw3;
-};
-
 #define AER_MAX_MULTI_ERR_DEVICES	5	/* Not likely to have more */
 struct aer_err_info {
 	struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
@@ -59,7 +52,7 @@
 
 	unsigned int status;		/* COR/UNCOR Error Status */
 	unsigned int mask;		/* COR/UNCOR Error Mask */
-	struct header_log_regs tlp;	/* TLP Header */
+	struct aer_header_log_regs tlp;	/* TLP Header */
 };
 
 struct aer_err_source {
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 9d3e4c8..b07a42e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/pm.h>
 #include <linux/suspend.h>
+#include <linux/cper.h>
 
 #include "aerdrv.h"
 
@@ -57,66 +58,44 @@
 	(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
 	AER_TRANSACTION_LAYER_ERROR)
 
-#define AER_PR(info, pdev, fmt, args...)				\
-	printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ?	\
-		KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev),	\
-		dev_name(&pdev->dev), ## args)
-
 /*
  * AER error strings
  */
-static char *aer_error_severity_string[] = {
+static const char *aer_error_severity_string[] = {
 	"Uncorrected (Non-Fatal)",
 	"Uncorrected (Fatal)",
 	"Corrected"
 };
 
-static char *aer_error_layer[] = {
+static const char *aer_error_layer[] = {
 	"Physical Layer",
 	"Data Link Layer",
 	"Transaction Layer"
 };
-static char *aer_correctable_error_string[] = {
-	"Receiver Error        ",	/* Bit Position 0	*/
+
+static const char *aer_correctable_error_string[] = {
+	"Receiver Error",		/* Bit Position 0	*/
 	NULL,
 	NULL,
 	NULL,
 	NULL,
 	NULL,
-	"Bad TLP               ",	/* Bit Position 6	*/
-	"Bad DLLP              ",	/* Bit Position 7	*/
-	"RELAY_NUM Rollover    ",	/* Bit Position 8	*/
+	"Bad TLP",			/* Bit Position 6	*/
+	"Bad DLLP",			/* Bit Position 7	*/
+	"RELAY_NUM Rollover",		/* Bit Position 8	*/
 	NULL,
 	NULL,
 	NULL,
-	"Replay Timer Timeout  ",	/* Bit Position 12	*/
-	"Advisory Non-Fatal    ",	/* Bit Position 13	*/
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
+	"Replay Timer Timeout",		/* Bit Position 12	*/
+	"Advisory Non-Fatal",		/* Bit Position 13	*/
 };
 
-static char *aer_uncorrectable_error_string[] = {
+static const char *aer_uncorrectable_error_string[] = {
 	NULL,
 	NULL,
 	NULL,
 	NULL,
-	"Data Link Protocol    ",	/* Bit Position 4	*/
+	"Data Link Protocol",		/* Bit Position 4	*/
 	NULL,
 	NULL,
 	NULL,
@@ -124,39 +103,29 @@
 	NULL,
 	NULL,
 	NULL,
-	"Poisoned TLP          ",	/* Bit Position 12	*/
-	"Flow Control Protocol ",	/* Bit Position 13	*/
-	"Completion Timeout    ",	/* Bit Position 14	*/
-	"Completer Abort       ",	/* Bit Position 15	*/
-	"Unexpected Completion ",	/* Bit Position 16	*/
-	"Receiver Overflow     ",	/* Bit Position 17	*/
-	"Malformed TLP         ",	/* Bit Position 18	*/
-	"ECRC                  ",	/* Bit Position 19	*/
-	"Unsupported Request   ",	/* Bit Position 20	*/
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
-	NULL,
+	"Poisoned TLP",			/* Bit Position 12	*/
+	"Flow Control Protocol",	/* Bit Position 13	*/
+	"Completion Timeout",		/* Bit Position 14	*/
+	"Completer Abort",		/* Bit Position 15	*/
+	"Unexpected Completion",	/* Bit Position 16	*/
+	"Receiver Overflow",		/* Bit Position 17	*/
+	"Malformed TLP",		/* Bit Position 18	*/
+	"ECRC",				/* Bit Position 19	*/
+	"Unsupported Request",		/* Bit Position 20	*/
 };
 
-static char *aer_agent_string[] = {
+static const char *aer_agent_string[] = {
 	"Receiver ID",
 	"Requester ID",
 	"Completer ID",
 	"Transmitter ID"
 };
 
-static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
+static void __aer_print_error(const char *prefix,
+			      struct aer_err_info *info)
 {
 	int i, status;
-	char *errmsg = NULL;
+	const char *errmsg = NULL;
 
 	status = (info->status & ~info->mask);
 
@@ -165,15 +134,17 @@
 			continue;
 
 		if (info->severity == AER_CORRECTABLE)
-			errmsg = aer_correctable_error_string[i];
+			errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
+				aer_correctable_error_string[i] : NULL;
 		else
-			errmsg = aer_uncorrectable_error_string[i];
+			errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
+				aer_uncorrectable_error_string[i] : NULL;
 
 		if (errmsg)
-			AER_PR(info, dev, "   [%2d] %s%s\n", i, errmsg,
+			printk("%s""   [%2d] %-22s%s\n", prefix, i, errmsg,
 				info->first_error == i ? " (First)" : "");
 		else
-			AER_PR(info, dev, "   [%2d] Unknown Error Bit%s\n", i,
+			printk("%s""   [%2d] Unknown Error Bit%s\n", prefix, i,
 				info->first_error == i ? " (First)" : "");
 	}
 }
@@ -181,11 +152,15 @@
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 {
 	int id = ((dev->bus->number << 8) | dev->devfn);
+	char prefix[44];
+
+	snprintf(prefix, sizeof(prefix), "%s%s %s: ",
+		 (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
+		 dev_driver_string(&dev->dev), dev_name(&dev->dev));
 
 	if (info->status == 0) {
-		AER_PR(info, dev,
-			"PCIe Bus Error: severity=%s, type=Unaccessible, "
-			"id=%04x(Unregistered Agent ID)\n",
+		printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, "
+			"id=%04x(Unregistered Agent ID)\n", prefix,
 			aer_error_severity_string[info->severity], id);
 	} else {
 		int layer, agent;
@@ -193,23 +168,22 @@
 		layer = AER_GET_LAYER_ERROR(info->severity, info->status);
 		agent = AER_GET_AGENT(info->severity, info->status);
 
-		AER_PR(info, dev,
-			"PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
-			aer_error_severity_string[info->severity],
+		printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+			prefix, aer_error_severity_string[info->severity],
 			aer_error_layer[layer], id, aer_agent_string[agent]);
 
-		AER_PR(info, dev,
-			"  device [%04x:%04x] error status/mask=%08x/%08x\n",
-			dev->vendor, dev->device, info->status, info->mask);
+		printk("%s""  device [%04x:%04x] error status/mask=%08x/%08x\n",
+			prefix, dev->vendor, dev->device,
+			info->status, info->mask);
 
-		__aer_print_error(info, dev);
+		__aer_print_error(prefix, info);
 
 		if (info->tlp_header_valid) {
 			unsigned char *tlp = (unsigned char *) &info->tlp;
-			AER_PR(info, dev, "  TLP Header:"
+			printk("%s""  TLP Header:"
 				" %02x%02x%02x%02x %02x%02x%02x%02x"
 				" %02x%02x%02x%02x %02x%02x%02x%02x\n",
-				*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+				prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
 				*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
 				*(tlp + 11), *(tlp + 10), *(tlp + 9),
 				*(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -218,8 +192,8 @@
 	}
 
 	if (info->id && info->error_dev_num > 1 && info->id == id)
-		AER_PR(info, dev,
-			"  Error of this Agent(%04x) is reported first\n", id);
+		printk("%s""  Error of this Agent(%04x) is reported first\n",
+			prefix, id);
 }
 
 void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -228,3 +202,61 @@
 		info->multi_error_valid ? "Multiple " : "",
 		aer_error_severity_string[info->severity], info->id);
 }
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static int cper_severity_to_aer(int cper_severity)
+{
+	switch (cper_severity) {
+	case CPER_SEV_RECOVERABLE:
+		return AER_NONFATAL;
+	case CPER_SEV_FATAL:
+		return AER_FATAL;
+	default:
+		return AER_CORRECTABLE;
+	}
+}
+
+void cper_print_aer(const char *prefix, int cper_severity,
+		    struct aer_capability_regs *aer)
+{
+	int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+	u32 status, mask;
+	const char **status_strs;
+
+	aer_severity = cper_severity_to_aer(cper_severity);
+	if (aer_severity == AER_CORRECTABLE) {
+		status = aer->cor_status;
+		mask = aer->cor_mask;
+		status_strs = aer_correctable_error_string;
+		status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
+	} else {
+		status = aer->uncor_status;
+		mask = aer->uncor_mask;
+		status_strs = aer_uncorrectable_error_string;
+		status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
+		tlp_header_valid = status & AER_LOG_TLP_MASKS;
+	}
+	layer = AER_GET_LAYER_ERROR(aer_severity, status);
+	agent = AER_GET_AGENT(aer_severity, status);
+	printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n",
+	       prefix, status, mask);
+	cper_print_bits(prefix, status, status_strs, status_strs_size);
+	printk("%s""aer_layer=%s, aer_agent=%s\n", prefix,
+	       aer_error_layer[layer], aer_agent_string[agent]);
+	if (aer_severity != AER_CORRECTABLE)
+		printk("%s""aer_uncor_severity: 0x%08x\n",
+		       prefix, aer->uncor_severity);
+	if (tlp_header_valid) {
+		const unsigned char *tlp;
+		tlp = (const unsigned char *)&aer->header_log;
+		printk("%s""aer_tlp_header:"
+			" %02x%02x%02x%02x %02x%02x%02x%02x"
+			" %02x%02x%02x%02x %02x%02x%02x%02x\n",
+			prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+			*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
+			*(tlp + 11), *(tlp + 10), *(tlp + 9),
+			*(tlp + 8), *(tlp + 15), *(tlp + 14),
+			*(tlp + 13), *(tlp + 12));
+	}
+}
+#endif
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43..a4e8eb9 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@
 {
 	unsigned long flags;
 	int captured = 0;
-	struct pps_ktime ts_real;
+	struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
 
 	/* check event type */
 	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b4185..1269fbd 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@
 
 	/* Several chips lock up trying to read undefined config space */
 	if (capable(CAP_SYS_ADMIN))
-		size = 0x200000;
+		size = RIO_MAINT_SPACE_SZ;
 
-	if (off > size)
+	if (off >= size)
 		return 0;
 	if (off + count > size) {
 		size -= off;
@@ -147,10 +147,10 @@
 	loff_t init_off = off;
 	u8 *data = (u8 *) buf;
 
-	if (off > 0x200000)
+	if (off >= RIO_MAINT_SPACE_SZ)
 		return 0;
-	if (off + count > 0x200000) {
-		size = 0x200000 - off;
+	if (off + count > RIO_MAINT_SPACE_SZ) {
+		size = RIO_MAINT_SPACE_SZ - off;
 		count = size;
 	}
 
@@ -200,7 +200,7 @@
 		 .name = "config",
 		 .mode = S_IRUGO | S_IWUSR,
 		 },
-	.size = 0x200000,
+	.size = RIO_MAINT_SPACE_SZ,
 	.read = rio_read_config,
 	.write = rio_write_config,
 };
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b..2bb5de1 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@
 
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
 
-	BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+	BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
 
 	return mc13xxx_regulators[id].voltages[val];
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4..06df898 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@
 		return REGULATOR_MODE_IDLE;
 	default:
 		BUG();
+		return -EINVAL;
 	}
 }
 
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e..5469c52 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
-	.alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee1..9507354 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
 /*
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -141,9 +141,11 @@
 		time->tm_hour = bcd2bin(hour);
 	}
 
-	time->tm_wday = bcd2bin(week);
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	time->tm_wday = bcd2bin(week) - 1;
 	time->tm_mday = bcd2bin(day);
-	time->tm_mon = bcd2bin(month & 0x7F);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	time->tm_mon = bcd2bin(month & 0x7F) - 1;
 	if (century)
 		add_century = 100;
 
@@ -162,9 +164,11 @@
 	buf[0] = bin2bcd(time->tm_sec);
 	buf[1] = bin2bcd(time->tm_min);
 	buf[2] = bin2bcd(time->tm_hour);
-	buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	buf[3] = bin2bcd(time->tm_wday + 1);
 	buf[4] = bin2bcd(time->tm_mday); /* Date */
-	buf[5] = bin2bcd(time->tm_mon);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	buf[5] = bin2bcd(time->tm_mon + 1);
 	if (time->tm_year >= 100) {
 		buf[5] |= 0x80;
 		buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba..bf7c687 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
 
 menuconfig THERMAL
 	tristate "Generic Thermal sysfs driver"
-	depends on NET
 	help
 	  Generic Thermal Sysfs driver offers a generic mechanism for
 	  thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c..713b7ea 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@
 
 static unsigned int thermal_event_seqnum;
 
-static struct genl_family thermal_event_genl_family = {
-	.id = GENL_ID_GENERATE,
-	.name = THERMAL_GENL_FAMILY_NAME,
-	.version = THERMAL_GENL_VERSION,
-	.maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
-	.name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int err;
@@ -1225,6 +1211,18 @@
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.name = THERMAL_GENL_FAMILY_NAME,
+	.version = THERMAL_GENL_VERSION,
+	.maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+	.name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
 int generate_netlink_event(u32 orig, enum events event)
 {
 	struct sk_buff *skb;
@@ -1301,6 +1299,15 @@
 	return result;
 }
 
+static void genetlink_exit(void)
+{
+	genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
 static int __init thermal_init(void)
 {
 	int result = 0;
@@ -1316,11 +1323,6 @@
 	return result;
 }
 
-static void genetlink_exit(void)
-{
-	genl_unregister_family(&thermal_event_genl_family);
-}
-
 static void __exit thermal_exit(void)
 {
 	class_unregister(&thermal_class);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c68..0f299b7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@
 
 	mutex_lock(&usb_address0_mutex);
 
-	if (!udev->config && oldspeed == USB_SPEED_SUPER) {
-		/* Don't reset USB 3.0 devices during an initial setup */
-		usb_set_device_state(udev, USB_STATE_DEFAULT);
-	} else {
-		/* Reset the device; full speed may morph to high speed */
-		/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
-		retval = hub_port_reset(hub, port1, udev, delay);
-		if (retval < 0)		/* error or disconnect */
-			goto fail;
-		/* success, speed is known */
-	}
+	/* Reset the device; full speed may morph to high speed */
+	/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+	retval = hub_port_reset(hub, port1, udev, delay);
+	if (retval < 0)		/* error or disconnect */
+		goto fail;
+	/* success, speed is known */
+
 	retval = -ENODEV;
 
 	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4ab..0231814 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@
 	}
 }
 
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 {
-	void *addr;
+	struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+	void __iomem *addr;
 	u32 temp;
 	u64 temp_64;
 
@@ -449,7 +450,7 @@
 	}
 }
 
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
 	/* Fields are 32 bits wide, DMA addresses are in bytes */
 	int field_size = 32 / 8;
@@ -488,7 +489,7 @@
 		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 }
 
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
 		     struct xhci_container_ctx *ctx,
 		     unsigned int last_ep)
 {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f..a953439 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@
 
 /***************** Streams structures manipulation *************************/
 
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs,
 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
@@ -335,7 +335,7 @@
  * The stream context array must be a power of 2, and can be as small as
  * 64 bytes or as large as 1MB.
  */
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs, dma_addr_t *dma,
 		gfp_t mem_flags)
 {
@@ -1900,11 +1900,11 @@
 	val &= DBOFF_MASK;
 	xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
 			" from cap regs base addr\n", val);
-	xhci->dba = (void *) xhci->cap_regs + val;
+	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
 	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = (void *) xhci->run_regs->ir_set;
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@
 	/* Set the event ring dequeue address */
 	xhci_set_hc_event_deq(xhci);
 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c..3289bf4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@
 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
 			dev->eps[ep_index].stopped_trb,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
+
 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
 	xhci_dbg(xhci, "Finding endpoint context\n");
 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@
 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
 			state->new_deq_ptr,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
 
 	trb = &state->new_deq_ptr->generic;
 	if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@
 
 		/* Scatter gather list entries may cross 64KB boundaries */
 		running_total = TRB_MAX_BUFF_SIZE -
-			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+		running_total &= TRB_MAX_BUFF_SIZE - 1;
 		if (running_total != 0)
 			num_trbs++;
 
 		/* How many more 64KB chunks to transfer, how many more TRBs? */
-		while (running_total < sg_dma_len(sg)) {
+		while (running_total < sg_dma_len(sg) && running_total < temp) {
 			num_trbs++;
 			running_total += TRB_MAX_BUFF_SIZE;
 		}
@@ -2394,11 +2400,11 @@
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
 	if (num_trbs != 0)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
 				"TRBs, %d left\n", __func__,
 				urb->ep->desc.bEndpointAddress, num_trbs);
 	if (running_total != urb->transfer_buffer_length)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
 				"queued %#x (%d), asked for %#x (%d)\n",
 				__func__,
 				urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@
 	sg = urb->sg;
 	addr = (u64) sg_dma_address(sg);
 	this_sg_len = sg_dma_len(sg);
-	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@
 				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
 				(unsigned int) addr + trb_buff_len);
 		if (TRB_MAX_BUFF_SIZE -
-				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@
 		}
 
 		trb_buff_len = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(addr & (TRB_MAX_BUFF_SIZE - 1));
 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
 			trb_buff_len =
@@ -2656,7 +2661,8 @@
 	num_trbs = 0;
 	/* How much data is (potentially) left before the 64KB boundary? */
 	running_total = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 
 	/* If there's some data on this 64KB chunk, or we have to send a
 	 * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@
 	/* How much data is in the first TRB? */
 	addr = (u64) urb->transfer_dma;
 	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-	if (urb->transfer_buffer_length < trb_buff_len)
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
@@ -2879,8 +2885,8 @@
 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
 	td_len = urb->iso_frame_desc[i].length;
 
-	running_total = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 	if (running_total != 0)
 		num_trbs++;
 
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e1..2083fc2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@
 /*
  * Set the run bit and wait for the host to be running.
  */
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
 {
 	u32 temp;
 	int ret;
@@ -329,7 +329,7 @@
 
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
 {
 	unsigned long flags;
 	int temp;
@@ -473,7 +473,7 @@
 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	if (NUM_TEST_NOOPS > 0)
 		doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@
 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	xhci_dbg(xhci, "cleaning up memory\n");
 	xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@
 		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 				&xhci->ir_set->irq_pending);
-		xhci_print_ir_set(xhci, xhci->ir_set, 0);
+		xhci_print_ir_set(xhci, 0);
 
 		xhci_dbg(xhci, "cleaning up memory\n");
 		xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@
 /* Returns 1 if the arguments are OK;
  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  */
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
 		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
 		const char *func) {
 	struct xhci_hcd	*xhci;
@@ -1693,7 +1693,7 @@
 	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
 }
 
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		struct xhci_dequeue_state *deq_state)
 {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd..7f127df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@
 }
 
 /* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1..c292d5c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@
 	INIT_LIST_HEAD(&musb->out_bulk);
 
 	hcd->uses_new_polling = 1;
+	hcd->has_tt = 1;
 
 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
 	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb..789b3af 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@
 	candidate->first = candidate->last = index;
 	candidate->offset_first = from;
 	candidate->to_last = to;
+	INIT_LIST_HEAD(&candidate->link);
 	candidate->usage = 1;
 	candidate->state = AFS_WBACK_PENDING;
 	init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index fc557a3..26869cd 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@
 	call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
-#define get_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	atomic_inc(&(kioctx)->users);					\
-} while (0)
-#define put_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\
-		__put_ioctx(kioctx);					\
-} while (0)
+static inline void get_ioctx(struct kioctx *kioctx)
+{
+	BUG_ON(atomic_read(&kioctx->users) <= 0);
+	atomic_inc(&kioctx->users);
+}
+
+static inline int try_get_ioctx(struct kioctx *kioctx)
+{
+	return atomic_inc_not_zero(&kioctx->users);
+}
+
+static inline void put_ioctx(struct kioctx *kioctx)
+{
+	BUG_ON(atomic_read(&kioctx->users) <= 0);
+	if (unlikely(atomic_dec_and_test(&kioctx->users)))
+		__put_ioctx(kioctx);
+}
 
 /* ioctx_alloc
  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@
 	rcu_read_lock();
 
 	hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
-		if (ctx->user_id == ctx_id && !ctx->dead) {
-			get_ioctx(ctx);
+		/*
+		 * RCU protects us against accessing freed memory but
+		 * we have to be careful not to get a reference when the
+		 * reference count already dropped to 0 (ctx->dead test
+		 * is unreliable because of races).
+		 */
+		if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
 			ret = ctx;
 			break;
 		}
@@ -1629,6 +1642,23 @@
 		goto out_put_req;
 
 	spin_lock_irq(&ctx->ctx_lock);
+	/*
+	 * We could have raced with io_destroy() and are currently holding a
+	 * reference to ctx which should be destroyed. We cannot submit IO
+	 * since ctx gets freed as soon as io_submit() puts its reference.  The
+	 * check here is reliable: io_destroy() sets ctx->dead before waiting
+	 * for outstanding IO and the barrier between these two is realized by
+	 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
+	 * increment ctx->reqs_active before checking for ctx->dead and the
+	 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
+	 * don't see ctx->dead set here, io_destroy() waits for our IO to
+	 * finish.
+	 */
+	if (ctx->dead) {
+		spin_unlock_irq(&ctx->ctx_lock);
+		ret = -EINVAL;
+		goto out_put_req;
+	}
 	aio_run_iocb(req);
 	if (!list_empty(&ctx->run_list)) {
 		/* drain the run list */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4fb8a34..8892870 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -873,6 +873,11 @@
 	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
 	if (ret)
 		goto out_del;
+	/*
+	 * bdev could be deleted beneath us which would implicitly destroy
+	 * the holder directory.  Hold on to it.
+	 */
+	kobject_get(bdev->bd_part->holder_dir);
 
 	list_add(&holder->list, &bdev->bd_holder_disks);
 	goto out_unlock;
@@ -909,6 +914,7 @@
 		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
 		del_symlink(bdev->bd_part->holder_dir,
 			    &disk_to_dev(disk)->kobj);
+		kobject_put(bdev->bd_part->holder_dir);
 		list_del_init(&holder->list);
 		kfree(holder);
 	}
@@ -922,14 +928,15 @@
  * flush_disk - invalidates all buffer-cache entries on a disk
  *
  * @bdev:      struct block device to be flushed
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Invalidates all buffer-cache entries on a disk. It should be called
  * when a disk has been changed -- either by a media change or online
  * resize.
  */
-static void flush_disk(struct block_device *bdev)
+static void flush_disk(struct block_device *bdev, bool kill_dirty)
 {
-	if (__invalidate_device(bdev)) {
+	if (__invalidate_device(bdev, kill_dirty)) {
 		char name[BDEVNAME_SIZE] = "";
 
 		if (bdev->bd_disk)
@@ -966,7 +973,7 @@
 		       "%s: detected capacity change from %lld to %lld\n",
 		       name, bdev_size, disk_size);
 		i_size_write(bdev->bd_inode, disk_size);
-		flush_disk(bdev);
+		flush_disk(bdev, false);
 	}
 }
 EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@
 	if (!(events & DISK_EVENT_MEDIA_CHANGE))
 		return 0;
 
-	flush_disk(bdev);
+	flush_disk(bdev, true);
 	if (bdops->revalidate_disk)
 		bdops->revalidate_disk(bdev->bd_disk);
 	return 1;
@@ -1600,7 +1607,7 @@
 }
 EXPORT_SYMBOL(lookup_bdev);
 
-int __invalidate_device(struct block_device *bdev)
+int __invalidate_device(struct block_device *bdev, bool kill_dirty)
 {
 	struct super_block *sb = get_super(bdev);
 	int res = 0;
@@ -1613,7 +1620,7 @@
 		 * hold).
 		 */
 		shrink_dcache_sb(sb);
-		res = invalidate_inodes(sb);
+		res = invalidate_inodes(sb, kill_dirty);
 		drop_super(sb);
 	}
 	invalidate_bdev(bdev);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2c98b3a..6f820fa 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1254,6 +1254,7 @@
 #define BTRFS_MOUNT_SPACE_CACHE		(1 << 12)
 #define BTRFS_MOUNT_CLEAR_CACHE		(1 << 13)
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
+#define BTRFS_MOUNT_ENOSPC_DEBUG	 (1 << 15)
 
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2219,8 @@
 				   u64 start, u64 end);
 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
 			       u64 num_bytes);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 type);
 
 /* ctree.c */
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f3c96fc..588ff98 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5376,7 +5376,7 @@
 			       num_bytes, data, 1);
 		goto again;
 	}
-	if (ret == -ENOSPC) {
+	if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
 		struct btrfs_space_info *sinfo;
 
 		sinfo = __find_space_info(root->fs_info, data);
@@ -8065,6 +8065,13 @@
 	return ret;
 }
 
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 type)
+{
+	u64 alloc_flags = get_alloc_profile(root, type);
+	return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+}
+
 /*
  * helper to account the unused space of all the readonly block group in the
  * list. takes mirrors into account.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 92ac519..fd3f172 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@
  */
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end, u64 max_bytes,
-		     unsigned long bits)
+		     unsigned long bits, int contig)
 {
 	struct rb_node *node;
 	struct extent_state *state;
 	u64 cur_start = *start;
 	u64 total_bytes = 0;
+	u64 last = 0;
 	int found = 0;
 
 	if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@
 		state = rb_entry(node, struct extent_state, rb_node);
 		if (state->start > search_end)
 			break;
-		if (state->end >= cur_start && (state->state & bits)) {
+		if (contig && found && state->start > last + 1)
+			break;
+		if (state->end >= cur_start && (state->state & bits) == bits) {
 			total_bytes += min(search_end, state->end) + 1 -
 				       max(cur_start, state->start);
 			if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@
 				*start = state->start;
 				found = 1;
 			}
+			last = state->end;
+		} else if (contig && found) {
+			break;
 		}
 		node = rb_next(node);
 		if (!node)
@@ -2912,6 +2918,46 @@
 	return sector;
 }
 
+/*
+ * helper function for fiemap, which doesn't want to see any holes.
+ * This maps until we find something past 'last'
+ */
+static struct extent_map *get_extent_skip_holes(struct inode *inode,
+						u64 offset,
+						u64 last,
+						get_extent_t *get_extent)
+{
+	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+	struct extent_map *em;
+	u64 len;
+
+	if (offset >= last)
+		return NULL;
+
+	while(1) {
+		len = last - offset;
+		if (len == 0)
+			break;
+		len = (len + sectorsize - 1) & ~(sectorsize - 1);
+		em = get_extent(inode, NULL, 0, offset, len, 0);
+		if (!em || IS_ERR(em))
+			return em;
+
+		/* if this isn't a hole return it */
+		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
+		    em->block_start != EXTENT_MAP_HOLE) {
+			return em;
+		}
+
+		/* this is a hole, advance to the next extent */
+		offset = extent_map_end(em);
+		free_extent_map(em);
+		if (offset >= last)
+			break;
+	}
+	return NULL;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -2921,16 +2967,19 @@
 	u32 flags = 0;
 	u32 found_type;
 	u64 last;
+	u64 last_for_get_extent = 0;
 	u64 disko = 0;
+	u64 isize = i_size_read(inode);
 	struct btrfs_key found_key;
 	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_path *path;
 	struct btrfs_file_extent_item *item;
 	int end = 0;
-	u64 em_start = 0, em_len = 0;
+	u64 em_start = 0;
+	u64 em_len = 0;
+	u64 em_end = 0;
 	unsigned long emflags;
-	int hole = 0;
 
 	if (len == 0)
 		return -EINVAL;
@@ -2940,6 +2989,10 @@
 		return -ENOMEM;
 	path->leave_spinning = 1;
 
+	/*
+	 * lookup the last file extent.  We're not using i_size here
+	 * because there might be preallocation past i_size
+	 */
 	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
 				       path, inode->i_ino, -1, 0);
 	if (ret < 0) {
@@ -2953,18 +3006,38 @@
 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
 	found_type = btrfs_key_type(&found_key);
 
-	/* No extents, just return */
+	/* No extents, but there might be delalloc bits */
 	if (found_key.objectid != inode->i_ino ||
 	    found_type != BTRFS_EXTENT_DATA_KEY) {
-		btrfs_free_path(path);
-		return 0;
+		/* have to trust i_size as the end */
+		last = (u64)-1;
+		last_for_get_extent = isize;
+	} else {
+		/*
+		 * remember the start of the last extent.  There are a
+		 * bunch of different factors that go into the length of the
+		 * extent, so its much less complex to remember where it started
+		 */
+		last = found_key.offset;
+		last_for_get_extent = last + 1;
 	}
-	last = found_key.offset;
 	btrfs_free_path(path);
 
+	/*
+	 * we might have some extents allocated but more delalloc past those
+	 * extents.  so, we trust isize unless the start of the last extent is
+	 * beyond isize
+	 */
+	if (last < isize) {
+		last = (u64)-1;
+		last_for_get_extent = isize;
+	}
+
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
 			 &cached_state, GFP_NOFS);
-	em = get_extent(inode, NULL, 0, off, max - off, 0);
+
+	em = get_extent_skip_holes(inode, off, last_for_get_extent,
+				   get_extent);
 	if (!em)
 		goto out;
 	if (IS_ERR(em)) {
@@ -2973,19 +3046,14 @@
 	}
 
 	while (!end) {
-		hole = 0;
-		off = em->start + em->len;
+		off = extent_map_end(em);
 		if (off >= max)
 			end = 1;
 
-		if (em->block_start == EXTENT_MAP_HOLE) {
-			hole = 1;
-			goto next;
-		}
-
 		em_start = em->start;
 		em_len = em->len;
-
+		em_end = extent_map_end(em);
+		emflags = em->flags;
 		disko = 0;
 		flags = 0;
 
@@ -3004,37 +3072,29 @@
 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
 			flags |= FIEMAP_EXTENT_ENCODED;
 
-next:
-		emflags = em->flags;
 		free_extent_map(em);
 		em = NULL;
-		if (!end) {
-			em = get_extent(inode, NULL, 0, off, max - off, 0);
-			if (!em)
-				goto out;
-			if (IS_ERR(em)) {
-				ret = PTR_ERR(em);
-				goto out;
-			}
-			emflags = em->flags;
-		}
-
-		if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
+		if ((em_start >= last) || em_len == (u64)-1 ||
+		   (last == (u64)-1 && isize <= em_end)) {
 			flags |= FIEMAP_EXTENT_LAST;
 			end = 1;
 		}
 
-		if (em_start == last) {
+		/* now scan forward to see if this is really the last extent. */
+		em = get_extent_skip_holes(inode, off, last_for_get_extent,
+					   get_extent);
+		if (IS_ERR(em)) {
+			ret = PTR_ERR(em);
+			goto out;
+		}
+		if (!em) {
 			flags |= FIEMAP_EXTENT_LAST;
 			end = 1;
 		}
-
-		if (!hole) {
-			ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-						em_len, flags);
-			if (ret)
-				goto out_free;
-		}
+		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+					      em_len, flags);
+		if (ret)
+			goto out_free;
 	}
 out_free:
 	free_extent_map(em);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7083cfa..9318dfe 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -191,7 +191,7 @@
 
 u64 count_range_bits(struct extent_io_tree *tree,
 		     u64 *start, u64 search_end,
-		     u64 max_bytes, unsigned long bits);
+		     u64 max_bytes, unsigned long bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb9bd78..0efdb65 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1913,7 +1913,7 @@
 
 	private = 0;
 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-			     (u64)-1, 1, EXTENT_DIRTY)) {
+			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
 					start, &private_failure);
 		if (ret == 0) {
@@ -5280,6 +5280,128 @@
 	return em;
 }
 
+struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
+					   size_t pg_offset, u64 start, u64 len,
+					   int create)
+{
+	struct extent_map *em;
+	struct extent_map *hole_em = NULL;
+	u64 range_start = start;
+	u64 end;
+	u64 found;
+	u64 found_end;
+	int err = 0;
+
+	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+	if (IS_ERR(em))
+		return em;
+	if (em) {
+		/*
+		 * if our em maps to a hole, there might
+		 * actually be delalloc bytes behind it
+		 */
+		if (em->block_start != EXTENT_MAP_HOLE)
+			return em;
+		else
+			hole_em = em;
+	}
+
+	/* check to see if we've wrapped (len == -1 or similar) */
+	end = start + len;
+	if (end < start)
+		end = (u64)-1;
+	else
+		end -= 1;
+
+	em = NULL;
+
+	/* ok, we didn't find anything, lets look for delalloc */
+	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
+				 end, len, EXTENT_DELALLOC, 1);
+	found_end = range_start + found;
+	if (found_end < range_start)
+		found_end = (u64)-1;
+
+	/*
+	 * we didn't find anything useful, return
+	 * the original results from get_extent()
+	 */
+	if (range_start > end || found_end <= start) {
+		em = hole_em;
+		hole_em = NULL;
+		goto out;
+	}
+
+	/* adjust the range_start to make sure it doesn't
+	 * go backwards from the start they passed in
+	 */
+	range_start = max(start,range_start);
+	found = found_end - range_start;
+
+	if (found > 0) {
+		u64 hole_start = start;
+		u64 hole_len = len;
+
+		em = alloc_extent_map(GFP_NOFS);
+		if (!em) {
+			err = -ENOMEM;
+			goto out;
+		}
+		/*
+		 * when btrfs_get_extent can't find anything it
+		 * returns one huge hole
+		 *
+		 * make sure what it found really fits our range, and
+		 * adjust to make sure it is based on the start from
+		 * the caller
+		 */
+		if (hole_em) {
+			u64 calc_end = extent_map_end(hole_em);
+
+			if (calc_end <= start || (hole_em->start > end)) {
+				free_extent_map(hole_em);
+				hole_em = NULL;
+			} else {
+				hole_start = max(hole_em->start, start);
+				hole_len = calc_end - hole_start;
+			}
+		}
+		em->bdev = NULL;
+		if (hole_em && range_start > hole_start) {
+			/* our hole starts before our delalloc, so we
+			 * have to return just the parts of the hole
+			 * that go until  the delalloc starts
+			 */
+			em->len = min(hole_len,
+				      range_start - hole_start);
+			em->start = hole_start;
+			em->orig_start = hole_start;
+			/*
+			 * don't adjust block start at all,
+			 * it is fixed at EXTENT_MAP_HOLE
+			 */
+			em->block_start = hole_em->block_start;
+			em->block_len = hole_len;
+		} else {
+			em->start = range_start;
+			em->len = found;
+			em->orig_start = range_start;
+			em->block_start = EXTENT_MAP_DELALLOC;
+			em->block_len = found;
+		}
+	} else if (hole_em) {
+		return hole_em;
+	}
+out:
+
+	free_extent_map(hole_em);
+	if (err) {
+		free_extent_map(em);
+		return ERR_PTR(err);
+	}
+	return em;
+}
+
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 						  u64 start, u64 len)
 {
@@ -6102,7 +6224,7 @@
 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len)
 {
-	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
+	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
 }
 
 int btrfs_readpage(struct file *file, struct page *page)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be2d4f6..5fdb2ab 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1071,12 +1071,15 @@
 	if (copy_from_user(&flags, arg, sizeof(flags)))
 		return -EFAULT;
 
-	if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
+	if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
 		return -EINVAL;
 
 	if (flags & ~BTRFS_SUBVOL_RDONLY)
 		return -EOPNOTSUPP;
 
+	if (!is_owner_or_cap(inode))
+		return -EACCES;
+
 	down_write(&root->fs_info->subvol_sem);
 
 	/* nothing to do */
@@ -1097,7 +1100,7 @@
 		goto out_reset;
 	}
 
-	ret = btrfs_update_root(trans, root,
+	ret = btrfs_update_root(trans, root->fs_info->tree_root,
 				&root->root_key, &root->root_item);
 
 	btrfs_commit_transaction(trans, root);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index cc9b450..a178f5e 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -280,6 +280,7 @@
 	unsigned long tot_out;
 	unsigned long tot_len;
 	char *buf;
+	bool may_late_unmap, need_unmap;
 
 	data_in = kmap(pages_in[0]);
 	tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@
 
 		tot_in += in_len;
 		working_bytes = in_len;
+		may_late_unmap = need_unmap = false;
 
 		/* fast path: avoid using the working buffer */
 		if (in_page_bytes_left >= in_len) {
 			buf = data_in + in_offset;
 			bytes = in_len;
+			may_late_unmap = true;
 			goto cont;
 		}
 
@@ -329,14 +332,17 @@
 				if (working_bytes == 0 && tot_in >= tot_len)
 					break;
 
-				kunmap(pages_in[page_in_index]);
-				page_in_index++;
-				if (page_in_index >= total_pages_in) {
+				if (page_in_index + 1 >= total_pages_in) {
 					ret = -1;
-					data_in = NULL;
 					goto done;
 				}
-				data_in = kmap(pages_in[page_in_index]);
+
+				if (may_late_unmap)
+					need_unmap = true;
+				else
+					kunmap(pages_in[page_in_index]);
+
+				data_in = kmap(pages_in[++page_in_index]);
 
 				in_page_bytes_left = PAGE_CACHE_SIZE;
 				in_offset = 0;
@@ -346,6 +352,8 @@
 		out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
 		ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
 					    &out_len);
+		if (need_unmap)
+			kunmap(pages_in[page_in_index - 1]);
 		if (ret != LZO_E_OK) {
 			printk(KERN_WARNING "btrfs decompress failed\n");
 			ret = -1;
@@ -363,8 +371,7 @@
 			break;
 	}
 done:
-	if (data_in)
-		kunmap(pages_in[page_in_index]);
+	kunmap(pages_in[page_in_index]);
 	return ret;
 }
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0825e4e..31ade58 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3654,6 +3654,7 @@
 	u32 item_size;
 	int ret;
 	int err = 0;
+	int progress = 0;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -3666,9 +3667,10 @@
 	}
 
 	while (1) {
+		progress++;
 		trans = btrfs_start_transaction(rc->extent_root, 0);
 		BUG_ON(IS_ERR(trans));
-
+restart:
 		if (update_backref_cache(trans, &rc->backref_cache)) {
 			btrfs_end_transaction(trans, rc->extent_root);
 			continue;
@@ -3781,6 +3783,15 @@
 			}
 		}
 	}
+	if (trans && progress && err == -ENOSPC) {
+		ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+					      rc->block_group->flags);
+		if (ret == 0) {
+			err = 0;
+			progress = 0;
+			goto restart;
+		}
+	}
 
 	btrfs_release_path(rc->extent_root, path);
 	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a004008..d39a989 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -155,7 +155,8 @@
 	Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
 	Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
 	Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
-	Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
+	Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
+	Opt_enospc_debug, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -184,6 +185,7 @@
 	{Opt_space_cache, "space_cache"},
 	{Opt_clear_cache, "clear_cache"},
 	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
+	{Opt_enospc_debug, "enospc_debug"},
 	{Opt_err, NULL},
 };
 
@@ -358,6 +360,9 @@
 		case Opt_user_subvol_rm_allowed:
 			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 			break;
+		case Opt_enospc_debug:
+			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
+			break;
 		case Opt_err:
 			printk(KERN_INFO "btrfs: unrecognized mount option "
 			       "'%s'\n", p);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index af7dbca..dd13eb8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1338,11 +1338,11 @@
 
 	ret = btrfs_shrink_device(device, 0);
 	if (ret)
-		goto error_brelse;
+		goto error_undo;
 
 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
 	if (ret)
-		goto error_brelse;
+		goto error_undo;
 
 	device->in_fs_metadata = 0;
 
@@ -1416,6 +1416,13 @@
 	mutex_unlock(&root->fs_info->volume_mutex);
 	mutex_unlock(&uuid_mutex);
 	return ret;
+error_undo:
+	if (device->writeable) {
+		list_add(&device->dev_alloc_list,
+			 &root->fs_info->fs_devices->alloc_list);
+		root->fs_info->fs_devices->rw_devices++;
+	}
+	goto error_brelse;
 }
 
 /*
@@ -1633,7 +1640,7 @@
 	device->dev_root = root->fs_info->dev_root;
 	device->bdev = bdev;
 	device->in_fs_metadata = 1;
-	device->mode = 0;
+	device->mode = FMODE_EXCL;
 	set_blocksize(device->bdev, 4096);
 
 	if (seeding_dev) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 267d0ad..4a09af9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
  * cleanup path and it is also acquired by eventpoll_release_file()
  * if a file has been pushed inside an epoll set and it is then
  * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
+ * It is also acquired when inserting an epoll fd onto another epoll
+ * fd. We do this so that we walk the epoll tree and ensure that this
+ * insertion does not create a cycle of epoll file descriptors, which
+ * could lead to deadlock. We need a global mutex to prevent two
+ * simultaneous inserts (A into B and B into A) from racing and
+ * constructing a cycle without either insert observing that it is
+ * going to.
  * It is possible to drop the "ep->mtx" and to use the global
  * mutex "epmutex" (together with "ep->lock") to have it working,
  * but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@
  */
 static DEFINE_MUTEX(epmutex);
 
+/* Used to check for epoll file descriptor inclusion loops */
+static struct nested_calls poll_loop_ncalls;
+
 /* Used for safe wake up implementation */
 static struct nested_calls poll_safewake_ncalls;
 
@@ -1198,6 +1208,62 @@
 	return res;
 }
 
+/**
+ * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
+ *                      API, to verify that adding an epoll file inside another
+ *                      epoll structure, does not violate the constraints, in
+ *                      terms of closed loops, or too deep chains (which can
+ *                      result in excessive stack usage).
+ *
+ * @priv: Pointer to the epoll file to be currently checked.
+ * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
+ *          data structure pointer.
+ * @call_nests: Current dept of the @ep_call_nested() call stack.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+{
+	int error = 0;
+	struct file *file = priv;
+	struct eventpoll *ep = file->private_data;
+	struct rb_node *rbp;
+	struct epitem *epi;
+
+	mutex_lock(&ep->mtx);
+	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+		epi = rb_entry(rbp, struct epitem, rbn);
+		if (unlikely(is_file_epoll(epi->ffd.file))) {
+			error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+					       ep_loop_check_proc, epi->ffd.file,
+					       epi->ffd.file->private_data, current);
+			if (error != 0)
+				break;
+		}
+	}
+	mutex_unlock(&ep->mtx);
+
+	return error;
+}
+
+/**
+ * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
+ *                 another epoll file (represented by @ep) does not create
+ *                 closed loops or too deep chains.
+ *
+ * @ep: Pointer to the epoll private data structure.
+ * @file: Pointer to the epoll file to be checked.
+ *
+ * Returns: Returns zero if adding the epoll @file inside current epoll
+ *          structure @ep does not violate the constraints, or -1 otherwise.
+ */
+static int ep_loop_check(struct eventpoll *ep, struct file *file)
+{
+	return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+			      ep_loop_check_proc, file, ep, current);
+}
+
 /*
  * Open an eventpoll file descriptor.
  */
@@ -1246,6 +1312,7 @@
 		struct epoll_event __user *, event)
 {
 	int error;
+	int did_lock_epmutex = 0;
 	struct file *file, *tfile;
 	struct eventpoll *ep;
 	struct epitem *epi;
@@ -1287,6 +1354,25 @@
 	 */
 	ep = file->private_data;
 
+	/*
+	 * When we insert an epoll file descriptor, inside another epoll file
+	 * descriptor, there is the change of creating closed loops, which are
+	 * better be handled here, than in more critical paths.
+	 *
+	 * We hold epmutex across the loop check and the insert in this case, in
+	 * order to prevent two separate inserts from racing and each doing the
+	 * insert "at the same time" such that ep_loop_check passes on both
+	 * before either one does the insert, thereby creating a cycle.
+	 */
+	if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+		mutex_lock(&epmutex);
+		did_lock_epmutex = 1;
+		error = -ELOOP;
+		if (ep_loop_check(ep, tfile) != 0)
+			goto error_tgt_fput;
+	}
+
+
 	mutex_lock(&ep->mtx);
 
 	/*
@@ -1322,6 +1408,9 @@
 	mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
+	if (unlikely(did_lock_epmutex))
+		mutex_unlock(&epmutex);
+
 	fput(tfile);
 error_fput:
 	fput(file);
@@ -1441,6 +1530,12 @@
 		EP_ITEM_COST;
 	BUG_ON(max_user_watches < 0);
 
+	/*
+	 * Initialize the structure used to perform epoll file descriptor
+	 * inclusion loops checks.
+	 */
+	ep_nested_calls_init(&poll_loop_ncalls);
+
 	/* Initialize the structure used to perform safe poll wait head wake ups */
 	ep_nested_calls_init(&poll_safewake_ncalls);
 
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfed844..83543b5 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1283,8 +1283,11 @@
 	if (err)
 		return err;
 
-	if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
-		return 0;
+	if (attr->ia_valid & ATTR_OPEN) {
+		if (fc->atomic_o_trunc)
+			return 0;
+		file = NULL;
+	}
 
 	if (attr->ia_valid & ATTR_SIZE)
 		is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc..9e0832d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@
 	return ff;
 }
 
-static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_release_async(struct work_struct *work)
 {
-	path_put(&req->misc.release.path);
+	struct fuse_req *req;
+	struct fuse_conn *fc;
+	struct path path;
+
+	req = container_of(work, struct fuse_req, misc.release.work);
+	path = req->misc.release.path;
+	fc = get_fuse_conn(path.dentry->d_inode);
+
+	fuse_put_request(fc, req);
+	path_put(&path);
 }
 
-static void fuse_file_put(struct fuse_file *ff)
+static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+{
+	if (fc->destroy_req) {
+		/*
+		 * If this is a fuseblk mount, then it's possible that
+		 * releasing the path will result in releasing the
+		 * super block and sending the DESTROY request.  If
+		 * the server is single threaded, this would hang.
+		 * For this reason do the path_put() in a separate
+		 * thread.
+		 */
+		atomic_inc(&req->count);
+		INIT_WORK(&req->misc.release.work, fuse_release_async);
+		schedule_work(&req->misc.release.work);
+	} else {
+		path_put(&req->misc.release.path);
+	}
+}
+
+static void fuse_file_put(struct fuse_file *ff, bool sync)
 {
 	if (atomic_dec_and_test(&ff->count)) {
 		struct fuse_req *req = ff->reserved_req;
 
-		req->end = fuse_release_end;
-		fuse_request_send_background(ff->fc, req);
+		if (sync) {
+			fuse_request_send(ff->fc, req);
+			path_put(&req->misc.release.path);
+			fuse_put_request(ff->fc, req);
+		} else {
+			req->end = fuse_release_end;
+			fuse_request_send_background(ff->fc, req);
+		}
 		kfree(ff);
 	}
 }
@@ -219,8 +253,12 @@
 	 * Normally this will send the RELEASE request, however if
 	 * some asynchronous READ or WRITE requests are outstanding,
 	 * the sending will be delayed.
+	 *
+	 * Make the release synchronous if this is a fuseblk mount,
+	 * synchronous RELEASE is allowed (and desirable) in this case
+	 * because the server can be trusted not to screw up.
 	 */
-	fuse_file_put(ff);
+	fuse_file_put(ff, ff->fc->destroy_req != NULL);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@
 		page_cache_release(page);
 	}
 	if (req->ff)
-		fuse_file_put(req->ff);
+		fuse_file_put(req->ff, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
 	__free_page(req->pages[0]);
-	fuse_file_put(req->ff);
+	fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a..d428694 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
 #include <linux/poll.h>
+#include <linux/workqueue.h>
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@
 	/** Data for asynchronous requests */
 	union {
 		struct {
-			struct fuse_release_in in;
+			union {
+				struct fuse_release_in in;
+				struct work_struct work;
+			};
 			struct path path;
 		} release;
 		struct fuse_init_in init_in;
diff --git a/fs/inode.c b/fs/inode.c
index 9c2b795..0647d80 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -548,11 +548,14 @@
 /**
  * invalidate_inodes	- attempt to free all inodes on a superblock
  * @sb:		superblock to operate on
+ * @kill_dirty: flag to guide handling of dirty inodes
  *
  * Attempts to free all inodes for a given superblock.  If there were any
  * busy inodes return a non-zero value, else zero.
+ * If @kill_dirty is set, discard dirty inodes too, otherwise treat
+ * them as busy.
  */
-int invalidate_inodes(struct super_block *sb)
+int invalidate_inodes(struct super_block *sb, bool kill_dirty)
 {
 	int busy = 0;
 	struct inode *inode, *next;
@@ -564,6 +567,10 @@
 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
 			continue;
+		if (inode->i_state & I_DIRTY && !kill_dirty) {
+			busy = 1;
+			continue;
+		}
 		if (atomic_read(&inode->i_count)) {
 			busy = 1;
 			continue;
diff --git a/fs/internal.h b/fs/internal.h
index 0663568..9b976b5 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,4 +112,4 @@
  */
 extern int get_nr_dirty_inodes(void);
 extern void evict_inodes(struct super_block *);
-extern int invalidate_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b0b953..d1edf26 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1244,7 +1244,7 @@
 		 */
 		br_write_lock(vfsmount_lock);
 		if (mnt_get_count(mnt) != 2) {
-			br_write_lock(vfsmount_lock);
+			br_write_unlock(vfsmount_lock);
 			return -EBUSY;
 		}
 		br_write_unlock(vfsmount_lock);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b9..6180da1 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@
 	       ocfs2_quota_trans_credits(sb);
 }
 
-/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
- * bitmap block for the new bit) dx_root update for free list */
-#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
+/* data block for new dir/symlink, allocation of directory block, dx_root
+ * update for free list */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
 
 static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
 {
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160..19ebc5a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@
 					u32 num_clusters, unsigned int e_flags)
 {
 	int ret, delete, index, credits =  0;
-	u32 new_bit, new_len;
+	u32 new_bit, new_len, orig_num_clusters;
 	unsigned int set_len;
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	handle_t *handle;
@@ -3261,6 +3261,8 @@
 		goto out;
 	}
 
+	orig_num_clusters = num_clusters;
+
 	while (num_clusters) {
 		ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
 					     p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@
 	 * in write-back mode.
 	 */
 	if (context->get_clusters == ocfs2_di_get_clusters) {
-		ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+		ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+					       orig_num_clusters);
 		if (ret)
 			mlog_errno(ret);
 	}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 38f986d..36c423f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1316,7 +1316,7 @@
 			       struct mount_options *mopt,
 			       int is_remount)
 {
-	int status;
+	int status, user_stack = 0;
 	char *p;
 	u32 tmp;
 
@@ -1459,6 +1459,15 @@
 			memcpy(mopt->cluster_stack, args[0].from,
 			       OCFS2_STACK_LABEL_LEN);
 			mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+			/*
+			 * Open code the memcmp here as we don't have
+			 * an osb to pass to
+			 * ocfs2_userspace_stack().
+			 */
+			if (memcmp(mopt->cluster_stack,
+				   OCFS2_CLASSIC_CLUSTER_STACK,
+				   OCFS2_STACK_LABEL_LEN))
+				user_stack = 1;
 			break;
 		case Opt_inode64:
 			mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@
 		}
 	}
 
-	/* Ensure only one heartbeat mode */
-	tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
-				 OCFS2_MOUNT_HB_NONE);
-	if (hweight32(tmp) != 1) {
-		mlog(ML_ERROR, "Invalid heartbeat mount options\n");
-		status = 0;
-		goto bail;
+	if (user_stack == 0) {
+		/* Ensure only one heartbeat mode */
+		tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+					 OCFS2_MOUNT_HB_GLOBAL |
+					 OCFS2_MOUNT_HB_NONE);
+		if (hweight32(tmp) != 1) {
+			mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+			status = 0;
+			goto bail;
+		}
 	}
 
 	status = 1;
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625..b10e354 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@
 	}
 
 	vm->vblk_size     = get_unaligned_be32(data + 0x08);
+	if (vm->vblk_size == 0) {
+		ldm_error ("Illegal VBLK size");
+		return false;
+	}
+
 	vm->vblk_offset   = get_unaligned_be32(data + 0x0C);
 	vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
 
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index ef1cef7..d7bd661 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -183,13 +183,19 @@
 
 #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
 /*
- * Module name is included in both debug and non-debug versions primarily for
- * error messages. The __FILE__ macro is not very useful for this, because it
- * often includes the entire pathname to the module
+ * The module name is used primarily for error and debug messages.
+ * The __FILE__ macro is not very useful for this, because it
+ * usually includes the entire pathname to the module making the
+ * debug output difficult to read.
  */
 #define ACPI_MODULE_NAME(name)          static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
 #else
+/*
+ * For the no-debug and no-error-msg cases, we must at least define
+ * a null module name.
+ */
 #define ACPI_MODULE_NAME(name)
+#define _acpi_module_name ""
 #endif
 
 /*
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e46ec95..ec90854 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20110112
+#define ACPI_CA_VERSION                 0x20110211
 
 #include "actypes.h"
 #include "actbl.h"
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 7e42bfe..d41c948 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -343,4 +343,20 @@
 #include <acpi/actbl1.h>
 #include <acpi/actbl2.h>
 
+/*
+ * Sizes of the various flavors of FADT. We need to look closely
+ * at the FADT length because the version number essentially tells
+ * us nothing because of many BIOS bugs where the version does not
+ * match the expected length. In other words, the length of the
+ * FADT is the bottom line as to what the version really is.
+ *
+ * For reference, the values below are as follows:
+ *     FADT V1  size: 0x74
+ *     FADT V2  size: 0x84
+ *     FADT V3+ size: 0xF4
+ */
+#define ACPI_FADT_V1_SIZE       (u32) (ACPI_FADT_OFFSET (flags) + 4)
+#define ACPI_FADT_V2_SIZE       (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
+#define ACPI_FADT_V3_SIZE       (u32) (sizeof (struct acpi_table_fadt))
+
 #endif				/* __ACTBL_H__ */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index c4dbb13..e67b523 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -30,10 +30,11 @@
 
 int erst_write(const struct cper_record_header *record);
 ssize_t erst_get_record_count(void);
-int erst_get_next_record_id(u64 *record_id);
+int erst_get_record_id_begin(int *pos);
+int erst_get_record_id_next(int *pos, u64 *record_id);
+void erst_get_record_id_end(void);
 ssize_t erst_read(u64 record_id, struct cper_record_header *record,
 		  size_t buflen);
-ssize_t erst_read_next(struct cper_record_header *record, size_t buflen);
 int erst_clear(u64 record_id);
 
 #endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188..b4bfe33 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_MMU
 
+#include <linux/mm_types.h>
+
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 extern int ptep_set_access_flags(struct vm_area_struct *vma,
 				 unsigned long address, pte_t *ptep,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fe29aad..348843b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1101,7 +1101,7 @@
 	struct platform_device *platformdev; /**< Platform device struture */
 
 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
-	int num_crtcs;                  /**< Number of CRTCs on this device */
+	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
 	void *dev_private;		/**< device private data */
 	void *mm_private;
 	struct address_space *dev_mapping;
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
index 7180013..4afd710 100644
--- a/include/linux/acpi_io.h
+++ b/include/linux/acpi_io.h
@@ -10,7 +10,6 @@
        return ioremap_cache(phys, size);
 }
 
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
 #endif
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f7df1ee..8414de2 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,28 @@
 #ifndef _AER_H_
 #define _AER_H_
 
+struct aer_header_log_regs {
+	unsigned int dw0;
+	unsigned int dw1;
+	unsigned int dw2;
+	unsigned int dw3;
+};
+
+struct aer_capability_regs {
+	u32 header;
+	u32 uncor_status;
+	u32 uncor_mask;
+	u32 uncor_severity;
+	u32 cor_status;
+	u32 cor_mask;
+	u32 cap_control;
+	struct aer_header_log_regs header_log;
+	u32 root_command;
+	u32 root_status;
+	u16 cor_err_source;
+	u16 uncor_err_source;
+};
+
 #if defined(CONFIG_PCIEAER)
 /* pci-e port driver needs this function to enable aer */
 extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
@@ -27,5 +49,7 @@
 }
 #endif
 
+extern void cper_print_aer(const char *prefix, int cper_severity,
+			   struct aer_capability_regs *aer);
 #endif //_AER_H_
 
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 3104aaf..372a258 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -388,5 +388,7 @@
 #pragma pack()
 
 u64 cper_next_record_id(void);
+void cper_print_bits(const char *prefix, unsigned int bits,
+		     const char *strs[], unsigned int strs_size);
 
 #endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 97d08d8..e38b50a4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2140,7 +2140,7 @@
 				   struct block_device *bdev);
 extern int revalidate_disk(struct gendisk *);
 extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab..21415cc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -431,6 +431,8 @@
 	struct list_head	entry;
 	struct completion	completion;
 	struct wakeup_source	*wakeup;
+#else
+	unsigned int		should_wakeup:1;
 #endif
 #ifdef CONFIG_PM_RUNTIME
 	struct timer_list	suspend_timer;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00d..03a67db 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -109,11 +109,6 @@
 	return dev->power.can_wakeup;
 }
 
-static inline bool device_may_wakeup(struct device *dev)
-{
-	return false;
-}
-
 static inline struct wakeup_source *wakeup_source_create(const char *name)
 {
 	return NULL;
@@ -134,24 +129,32 @@
 
 static inline int device_wakeup_enable(struct device *dev)
 {
-	return -EINVAL;
+	dev->power.should_wakeup = true;
+	return 0;
 }
 
 static inline int device_wakeup_disable(struct device *dev)
 {
+	dev->power.should_wakeup = false;
+	return 0;
+}
+
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+	dev->power.should_wakeup = enable;
 	return 0;
 }
 
 static inline int device_init_wakeup(struct device *dev, bool val)
 {
-	dev->power.can_wakeup = val;
-	return val ? -EINVAL : 0;
+	device_set_wakeup_capable(dev, val);
+	device_set_wakeup_enable(dev, val);
+	return 0;
 }
 
-
-static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+static inline bool device_may_wakeup(struct device *dev)
 {
-	return -EINVAL;
+	return dev->power.can_wakeup && dev->power.should_wakeup;
 }
 
 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcba..9026b30 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
 #define LINUX_RIO_REGS_H
 
 /*
- * In RapidIO, each device has a 2MB configuration space that is
+ * In RapidIO, each device has a 16MB configuration space that is
  * accessed via maintenance transactions.  Portions of configuration
  * space are standardized and/or reserved.
  */
+#define RIO_MAINT_SPACE_SZ	0x1000000 /* 16MB of RapidIO mainenance space */
+
 #define RIO_DEV_ID_CAR		0x00	/* [I] Device Identity CAR */
 #define RIO_DEV_INFO_CAR	0x04	/* [I] Device Information CAR */
 #define RIO_ASM_ID_CAR		0x08	/* [I] Assembly Identity CAR */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8651556..d3ec89f 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -172,6 +172,14 @@
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
 		const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#ifdef CONFIG_NET
 extern int generate_netlink_event(u32 orig, enum events event);
+#else
+static inline int generate_netlink_event(u32 orig, enum events event)
+{
+	return 0;
+}
+#endif
 
 #endif /* __THERMAL_H__ */
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761..a3b5aff 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@
 	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
 }
 
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+	struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
 #endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80..ed228ef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@
 {
 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-	return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+	if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+		return 0;
+	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+		return 1;
+	return tick_broadcast_oneshot_available();
 }
 
 /*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefb..f65d3a7 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
 extern void tick_check_oneshot_broadcast(int cpu);
+bool tick_broadcast_oneshot_available(void);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -46,6 +47,7 @@
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
 static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline bool tick_broadcast_oneshot_available(void) { return true; }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
@@ -76,6 +78,7 @@
 	return 0;
 }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline bool tick_broadcast_oneshot_available(void) { return false; }
 #endif /* !TICK_ONESHOT */
 
 /*
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe1..93ca08b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@
 	/*
 	 * Ensure that the address returned is DMA'ble
 	 */
-	if (!dma_capable(dev, dev_addr, size))
-		panic("map_single: bounce buffer is not DMA'ble");
+	if (!dma_capable(dev, dev_addr, size)) {
+		swiotlb_tbl_unmap_single(dev, map, size, dir);
+		dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+	}
 
 	return dev_addr;
 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 368fc9d..49355a9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1830,7 +1830,7 @@
 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
 		unsigned nid;
 
-		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
+		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
 		mpol_cond_put(pol);
 		page = alloc_page_interleave(gfp, order, nid);
 		put_mems_allowed();
diff --git a/mm/migrate.c b/mm/migrate.c
index 7661152..352de555 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1287,14 +1287,14 @@
 		return -EPERM;
 
 	/* Find the mm_struct */
-	read_lock(&tasklist_lock);
+	rcu_read_lock();
 	task = pid ? find_task_by_vpid(pid) : current;
 	if (!task) {
-		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
 		return -ESRCH;
 	}
 	mm = get_task_mm(task);
-	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
 
 	if (!mm)
 		return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9925b63..1de98d4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -94,9 +94,7 @@
 		 */
 		mapping = vma->vm_file->f_mapping;
 		spin_lock(&mapping->i_mmap_lock);
-		if (new_vma->vm_truncate_count &&
-		    new_vma->vm_truncate_count != vma->vm_truncate_count)
-			new_vma->vm_truncate_count = 0;
+		new_vma->vm_truncate_count = 0;
 	}
 
 	/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a873e61..cdef1d4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5376,10 +5376,9 @@
 	for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
 		unsigned long check = pfn + iter;
 
-		if (!pfn_valid_within(check)) {
-			iter++;
+		if (!pfn_valid_within(check))
 			continue;
-		}
+
 		page = pfn_to_page(check);
 		if (!page_count(page)) {
 			if (PageBuddy(page))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 07a458d..0341c57 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1940,7 +1940,7 @@
 
 	error = -EINVAL;
 	if (S_ISBLK(inode->i_mode)) {
-		bdev = I_BDEV(inode);
+		bdev = bdgrab(I_BDEV(inode));
 		error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
 				   sys_swapon);
 		if (error < 0) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 49feb46..d64296b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@
 	next = start;
 	while (next <= end &&
 	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+		mem_cgroup_uncharge_start();
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			struct page *page = pvec.pages[i];
 			pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@
 			unlock_page(page);
 		}
 		pagevec_release(&pvec);
+		mem_cgroup_uncharge_end();
 		cond_resched();
 	}
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17497d0..6771ea7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1841,16 +1841,28 @@
 	if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
 		return false;
 
-	/*
-	 * If we failed to reclaim and have scanned the full list, stop.
-	 * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
-	 *       faster but obviously would be less likely to succeed
-	 *       allocation. If this is desirable, use GFP_REPEAT to decide
-	 *       if both reclaimed and scanned should be checked or just
-	 *       reclaimed
-	 */
-	if (!nr_reclaimed && !nr_scanned)
-		return false;
+	/* Consider stopping depending on scan and reclaim activity */
+	if (sc->gfp_mask & __GFP_REPEAT) {
+		/*
+		 * For __GFP_REPEAT allocations, stop reclaiming if the
+		 * full LRU list has been scanned and we are still failing
+		 * to reclaim pages. This full LRU scan is potentially
+		 * expensive but a __GFP_REPEAT caller really wants to succeed
+		 */
+		if (!nr_reclaimed && !nr_scanned)
+			return false;
+	} else {
+		/*
+		 * For non-__GFP_REPEAT allocations which can presumably
+		 * fail without consequence, stop if we failed to reclaim
+		 * any pages from the last SWAP_CLUSTER_MAX number of
+		 * pages that were scanned. This will return to the
+		 * caller faster at the risk reclaim/compaction and
+		 * the resulting allocation attempt fails
+		 */
+		if (!nr_reclaimed)
+			return false;
+	}
 
 	/*
 	 * If we have not reclaimed enough pages for compaction and the
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0..d763793 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@
 		return ret;
 
 	plen -= sizeof(*token);
-	token = kmalloc(sizeof(*token), GFP_KERNEL);
+	token = kzalloc(sizeof(*token), GFP_KERNEL);
 	if (!token)
 		return -ENOMEM;
 
-	token->kad = kmalloc(plen, GFP_KERNEL);
+	token->kad = kzalloc(plen, GFP_KERNEL);
 	if (!token->kad) {
 		kfree(token);
 		return -ENOMEM;
@@ -731,10 +731,10 @@
 		goto error;
 
 	ret = -ENOMEM;
-	token = kmalloc(sizeof(*token), GFP_KERNEL);
+	token = kzalloc(sizeof(*token), GFP_KERNEL);
 	if (!token)
 		goto error;
-	token->kad = kmalloc(plen, GFP_KERNEL);
+	token->kad = kzalloc(plen, GFP_KERNEL);
 	if (!token->kad)
 		goto error_free;
 
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae5..53b53e9 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@
 
 fail_input:
 	input_free_device(jack->input_dev);
+	kfree(jack->id);
 	kfree(jack);
 	return err;
 }
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index dd7c5c1..4d5004e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3114,6 +3114,8 @@
 	SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
 	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
 	SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3937,6 +3939,8 @@
 	  .patch = patch_cxt5066 },
 	{ .id = 0x14f15069, .name = "CX20585",
 	  .patch = patch_cxt5066 },
+	{ .id = 0x14f1506e, .name = "CX20590",
+	  .patch = patch_cxt5066 },
 	{ .id = 0x14f15097, .name = "CX20631",
 	  .patch = patch_conexant_auto },
 	{ .id = 0x14f15098, .name = "CX20632",
@@ -3963,6 +3967,7 @@
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
 MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ea48b4..bd7b123 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -586,7 +586,12 @@
 	0x0f, 0x10, 0x11, 0x1f, 0x20,
 };
 
-static hda_nid_t stac92hd88xxx_pin_nids[10] = {
+static hda_nid_t stac92hd87xxx_pin_nids[6] = {
+	0x0a, 0x0b, 0x0c, 0x0d,
+	0x0f, 0x11,
+};
+
+static hda_nid_t stac92hd88xxx_pin_nids[8] = {
 	0x0a, 0x0b, 0x0c, 0x0d,
 	0x0f, 0x11, 0x1f, 0x20,
 };
@@ -5430,12 +5435,13 @@
 	switch (codec->vendor_id) {
 	case 0x111d76d1:
 	case 0x111d76d9:
+	case 0x111d76e5:
 		spec->dmic_nids = stac92hd87b_dmic_nids;
 		spec->num_dmics = stac92xx_connected_ports(codec,
 				stac92hd87b_dmic_nids,
 				STAC92HD87B_NUM_DMICS);
-		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
-		spec->pin_nids = stac92hd88xxx_pin_nids;
+		spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
+		spec->pin_nids = stac92hd87xxx_pin_nids;
 		spec->mono_nid = 0;
 		spec->num_pwrs = 0;
 		break;
@@ -5443,6 +5449,7 @@
 	case 0x111d7667:
 	case 0x111d7668:
 	case 0x111d7669:
+	case 0x111d76e3:
 		spec->num_dmics = stac92xx_connected_ports(codec,
 				stac92hd88xxx_dmic_nids,
 				STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@
 	{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
 	{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
 	{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
+	{ .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
+	{ .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
 	{ .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
 	{} /* terminator */
 };
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a76c326..63b0054 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -567,7 +567,7 @@
 		hda_nid_t nid = cfg->inputs[i].pin;
 		if (spec->smart51_enabled && is_smart51_pins(spec, nid))
 			ctl = PIN_OUT;
-		else if (i == AUTO_PIN_MIC)
+		else if (cfg->inputs[i].type == AUTO_PIN_MIC)
 			ctl = PIN_VREF50;
 		else
 			ctl = PIN_IN;
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index e8490f3..e3ec243 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -165,7 +165,7 @@
 
 #define WM8903_VMID_RES_50K                          2
 #define WM8903_VMID_RES_250K                         3
-#define WM8903_VMID_RES_5K                           4
+#define WM8903_VMID_RES_5K                           6
 
 /*
  * R8 (0x08) - Analogue DAC 0
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index a60b5db..ebaee5c 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3000,11 +3000,10 @@
 		report |= SND_JACK_BTN_5;
 
 done:
-	snd_soc_jack_report(wm8994->micdet[0].jack,
+	snd_soc_jack_report(wm8994->micdet[0].jack, report,
 			    SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
 			    SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
-			    SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
-			    report);
+			    SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
 }
 
 /**
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 613df5d..5168927 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -674,6 +674,9 @@
 };
 
 static const struct snd_soc_dapm_route analogue_routes[] = {
+	{ "MICBIAS1", NULL, "CLK_SYS" },
+	{ "MICBIAS2", NULL, "CLK_SYS" },
+
 	{ "IN1L PGA", "IN1LP Switch", "IN1LP" },
 	{ "IN1L PGA", "IN1LN Switch", "IN1LN" },
 
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index e20c9e1..1e9bcca 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -79,7 +79,7 @@
 	.name		= "tlv320aic23",
 	.stream_name	= "TLV320AIC23",
 	.codec_dai_name	= "tlv320aic23-hifi",
-	.platform_name	= "imx-pcm-audio.0",
+	.platform_name	= "imx-fiq-pcm-audio.0",
 	.codec_name	= "tlv320aic23-codec.0-001a",
 	.cpu_dai_name	= "imx-ssi.0",
 	.ops		= &eukrea_tlv320_snd_ops,
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 28333e7..dc65650 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -117,7 +117,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9705-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name = "wm9705-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 01bf316..51897fc 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -99,7 +99,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9705-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9705-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index c6a37c6e..053ed20 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -89,7 +89,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9712-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9712-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index fc22e6e..b13a425 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -37,7 +37,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9712-hifi",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9712-aux",
 		.platform_name = "pxa-pcm-audio",
 		.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 0d70fc8..38ca675 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -162,7 +162,7 @@
 	{
 		.name = "AC97",
 		.stream_name = "AC97 HiFi",
-		.cpu_dai_name = "pxa-ac97.0",
+		.cpu_dai_name = "pxa2xx-ac97",
 		.codec_dai_name = "wm9713-hifi",
 		.codec_name = "wm9713-codec",
 		.init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@
 	{
 		.name = "AC97 Aux",
 		.stream_name = "AC97 Aux",
-		.cpu_dai_name = "pxa-ac97.1",
+		.cpu_dai_name = "pxa2xx-ac97-aux",
 		.codec_dai_name ="wm9713-aux",
 		.codec_name = "wm9713-codec",
 		.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 857db96..504e400 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -132,7 +132,7 @@
 {
 	.name = "AC97 HiFi",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_dai_name =  "wm9712-hifi",
 	.codec_name = "wm9712-codec",
 	.platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@
 {
 	.name = "AC97 Aux",
 	.stream_name = "AC97 Aux",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_dai_name = "wm9712-aux",
 	.codec_name = "wm9712-codec",
 	.platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index f75804e..4b6e5d6 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -219,7 +219,7 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_dai_name = "wm9712-hifi",
 	.platform_name = "pxa-pcm-audio",
 	.codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@
 {
 	.name = "AC97 Aux",
 	.stream_name = "AC97 Aux",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_dai_name = "wm9712-aux",
 	.platform_name = "pxa-pcm-audio",
 	.codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b222a7d..25bba10 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -166,7 +166,7 @@
 	.stream_name = "AC97 HiFi",
 	.codec_name = "wm9713-codec",
 	.platform_name = "pxa-pcm-audio",
-	.cpu_dai_name = "pxa-ac97.0",
+	.cpu_dai_name = "pxa2xx-ac97",
 	.codec_name = "wm9713-hifi",
 	.init = zylonite_wm9713_init,
 },
@@ -175,7 +175,7 @@
 	.stream_name = "AC97 Aux",
 	.codec_name = "wm9713-codec",
 	.platform_name = "pxa-pcm-audio",
-	.cpu_dai_name = "pxa-ac97.1",
+	.cpu_dai_name = "pxa2xx-ac97-aux",
 	.codec_name = "wm9713-aux",
 },
 {
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb..c0f8270b 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@
 		return -ENOMEM;
 	}
 
+	mutex_init(&chip->shutdown_mutex);
 	chip->index = idx;
 	chip->dev = dev;
 	chip->card = card;
@@ -531,6 +532,7 @@
 	chip = ptr;
 	card = chip->card;
 	mutex_lock(&register_mutex);
+	mutex_lock(&chip->shutdown_mutex);
 	chip->shutdown = 1;
 	chip->num_interfaces--;
 	if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@
 			snd_usb_mixer_disconnect(p);
 		}
 		usb_chip[chip->index] = NULL;
+		mutex_unlock(&chip->shutdown_mutex);
 		mutex_unlock(&register_mutex);
 		snd_card_free_when_closed(card);
 	} else {
+		mutex_unlock(&chip->shutdown_mutex);
 		mutex_unlock(&register_mutex);
 	}
 }
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522..e3f6805 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@
 	}
 
 	if (changed) {
+		mutex_lock(&subs->stream->chip->shutdown_mutex);
 		/* format changed */
 		snd_usb_release_substream_urbs(subs, 0);
 		/* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@
 						  params_rate(hw_params),
 						  snd_pcm_format_physical_width(params_format(hw_params)) *
 							params_channels(hw_params));
+		mutex_unlock(&subs->stream->chip->shutdown_mutex);
 	}
 
 	return ret;
@@ -385,8 +387,9 @@
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
-	if (!subs->stream->chip->shutdown)
-		snd_usb_release_substream_urbs(subs, 0);
+	mutex_lock(&subs->stream->chip->shutdown_mutex);
+	snd_usb_release_substream_urbs(subs, 0);
+	mutex_unlock(&subs->stream->chip->shutdown_mutex);
 	return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21..6e66fff 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@
 	struct snd_card *card;
 	u32 usb_id;
 	int shutdown;
+	struct mutex shutdown_mutex;
 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
 	int num_interfaces;
 	int num_suspended_intf;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03..0ace786 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@
 		c->start_time = start;
 	if (p->start_time == 0 || p->start_time > start)
 		p->start_time = start;
-
-	if (cpu > numcpus)
-		numcpus = cpu;
 }
 
 #define MAX_CPUS 4096
@@ -511,6 +508,9 @@
 		if (!event_str)
 			return 0;
 
+		if (sample->cpu > numcpus)
+			numcpus = sample->cpu;
+
 		if (strcmp(event_str, "power:cpu_idle") == 0) {
 			struct power_processor_entry *ppe = (void *)te;
 			if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 32f4f1f..df51560 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -585,6 +585,7 @@
 {
 	struct sort_entry *se;
 	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+	u64 nr_events;
 	const char *sep = symbol_conf.field_sep;
 	int ret;
 
@@ -593,6 +594,7 @@
 
 	if (pair_hists) {
 		period = self->pair ? self->pair->period : 0;
+		nr_events = self->pair ? self->pair->nr_events : 0;
 		total = pair_hists->stats.total_period;
 		period_sys = self->pair ? self->pair->period_sys : 0;
 		period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@
 		period_guest_us = self->pair ? self->pair->period_guest_us : 0;
 	} else {
 		period = self->period;
+		nr_events = self->nr_events;
 		total = session_total;
 		period_sys = self->period_sys;
 		period_us = self->period_us;
@@ -640,9 +643,9 @@
 
 	if (symbol_conf.show_nr_samples) {
 		if (sep)
-			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
 		else
-			ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
+			ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
 	}
 
 	if (pair_hists) {
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index fb737fe..96c8660 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -456,9 +456,9 @@
 		return;
 
 	svg_legenda_box(0,	"Running", "sample");
-	svg_legenda_box(100,	"Idle","rect.c1");
-	svg_legenda_box(200,	"Deeper Idle", "rect.c3");
-	svg_legenda_box(350,	"Deepest Idle", "rect.c6");
+	svg_legenda_box(100,	"Idle","c1");
+	svg_legenda_box(200,	"Deeper Idle", "c3");
+	svg_legenda_box(350,	"Deepest Idle", "c6");
 	svg_legenda_box(550,	"Sleeping", "process2");
 	svg_legenda_box(650,	"Waiting for cpu", "waiting");
 	svg_legenda_box(800,	"Blocked on IO", "blocked");