Gentoo Logo
Gentoo Spaceship




Note: Due to technical difficulties, the Archives are currently not up to date. GMANE provides an alternative service for most mailing lists.
c.f. bug 424647
List Archive: gentoo-commits
Navigation:
Lists: gentoo-commits: < Prev By Thread Next > < Prev By Date Next >
Headers:
To: gentoo-commits@g.o
From: "Mike Pagano (mpagano)" <mpagano@g.o>
Subject: linux-patches r2162 - genpatches-2.6/trunk/3.0
Date: Tue, 19 Jun 2012 00:00:27 +0000 (UTC)
Author: mpagano
Date: 2012-06-19 00:00:27 +0000 (Tue, 19 Jun 2012)
New Revision: 2162

Added:
   genpatches-2.6/trunk/3.0/1033_linux-3.0.34.patch
   genpatches-2.6/trunk/3.0/1034_linux-3.0.35.patch
Modified:
   genpatches-2.6/trunk/3.0/0000_README
Log:
Linux patches 3.0.34 and 3.0.35

Modified: genpatches-2.6/trunk/3.0/0000_README
===================================================================
--- genpatches-2.6/trunk/3.0/0000_README	2012-06-18 17:49:37 UTC (rev 2161)
+++ genpatches-2.6/trunk/3.0/0000_README	2012-06-19 00:00:27 UTC (rev 2162)
@@ -167,6 +167,14 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.0.33
 
+Patch:  1033_linux-3.0.34.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.0.34
+
+Patch:  1034_linux-3.0.35.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.0.35
+
 Patch:  1800_fix-zcache-build.patch
 From:   http://bugs.gentoo.org/show_bug.cgi?id=376325
 Desc:   Fix zcache build error

Added: genpatches-2.6/trunk/3.0/1033_linux-3.0.34.patch
===================================================================
--- genpatches-2.6/trunk/3.0/1033_linux-3.0.34.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/1033_linux-3.0.34.patch	2012-06-19 00:00:27 UTC (rev 2162)
@@ -0,0 +1,1483 @@
+diff --git a/Makefile b/Makefile
+index c621c70..61ef485 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 0
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Sneaky Weasel
+ 
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 5350342..07ef351 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -552,7 +552,7 @@
+ 	 * entry (identifying the physical page) and %r23 up with
+ 	 * the from tlb entry (or nothing if only a to entry---for
+ 	 * clear_user_page_asm) */
+-	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
++	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
+ 	cmpib,COND(<>),n 0,\spc,\fault
+ 	ldil		L%(TMPALIAS_MAP_START),\tmp
+ #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+@@ -581,11 +581,15 @@
+ 	 */
+ 	cmpiclr,=	0x01,\tmp,%r0
+ 	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+-#ifdef CONFIG_64BIT
++.ifc \patype,20
+ 	depd,z		\prot,8,7,\prot
+-#else
++.else
++.ifc \patype,11
+ 	depw,z		\prot,8,7,\prot
+-#endif
++.else
++	.error "undefined PA type to do_alias"
++.endif
++.endif
+ 	/*
+ 	 * OK, it is in the temp alias region, check whether "from" or "to".
+ 	 * Check "subtle" note in pacache.S re: r23/r26.
+@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
+ 	nop
+ 
+ dtlb_check_alias_20w:
+-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
+ 
+ 	idtlbt          pte,prot
+ 
+@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
+ 	nop
+ 
+ nadtlb_check_alias_20w:
+-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
++	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+ 
+ 	idtlbt          pte,prot
+ 
+@@ -1245,7 +1249,7 @@ dtlb_miss_11:
+ 	nop
+ 
+ dtlb_check_alias_11:
+-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
+ 
+ 	idtlba          pte,(va)
+ 	idtlbp          prot,(va)
+@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
+ 	nop
+ 
+ nadtlb_check_alias_11:
+-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
++	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
+ 
+ 	idtlba          pte,(va)
+ 	idtlbp          prot,(va)
+@@ -1304,7 +1308,7 @@ dtlb_miss_20:
+ 	nop
+ 
+ dtlb_check_alias_20:
+-	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
+ 	
+ 	idtlbt          pte,prot
+ 
+@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
+ 	nop
+ 
+ nadtlb_check_alias_20:
+-	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate
++	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+ 
+ 	idtlbt          pte,prot
+ 
+@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
+ 	nop
+ 
+ naitlb_check_alias_20w:
+-	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
+ 
+ 	iitlbt		pte,prot
+ 
+@@ -1511,7 +1515,7 @@ naitlb_miss_11:
+ 	nop
+ 
+ naitlb_check_alias_11:
+-	do_alias	spc,t0,t1,va,pte,prot,itlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
+ 
+ 	iitlba          pte,(%sr0, va)
+ 	iitlbp          prot,(%sr0, va)
+@@ -1557,7 +1561,7 @@ naitlb_miss_20:
+ 	nop
+ 
+ naitlb_check_alias_20:
+-	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault
++	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
+ 
+ 	iitlbt          pte,prot
+ 
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index fa6f2b8..64a9998 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -50,8 +50,10 @@ SECTIONS
+ 	. = KERNEL_BINARY_TEXT_START;
+ 
+ 	_text = .;		/* Text and read-only data */
+-	.text ALIGN(16) : {
++	.head ALIGN(16) : {
+ 		HEAD_TEXT
++	} = 0
++	.text ALIGN(16) : {
+ 		TEXT_TEXT
+ 		SCHED_TEXT
+ 		LOCK_TEXT
+@@ -65,7 +67,7 @@ SECTIONS
+ 		*(.fixup)
+ 		*(.lock.text)		/* out-of-line lock text */
+ 		*(.gnu.warning)
+-	} = 0
++	}
+ 	/* End of text section */
+ 	_etext = .;
+ 
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index effff47..43876f1 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ 	ptep->pte_low = pte.pte_low;
+ }
+ 
++#define pmd_read_atomic pmd_read_atomic
++/*
++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
++ * where pte_offset_map_lock is called, concurrent page faults are
++ * allowed, if the mmap_sem is hold for reading. An example is mincore
++ * vs page faults vs MADV_DONTNEED. On the page fault side
++ * pmd_populate rightfully does a set_64bit, but if we're reading the
++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
++ * because gcc will not read the 64bit of the pmd atomically. To fix
++ * this all places running pmd_offset_map_lock() while holding the
++ * mmap_sem in read mode, shall read the pmdp pointer using this
++ * function to know if the pmd is null nor not, and in turn to know if
++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
++ * operations.
++ *
++ * Without THP if the mmap_sem is hold for reading, the
++ * pmd can only transition from null to not null while pmd_read_atomic runs.
++ * So there's no need of literally reading it atomically.
++ *
++ * With THP if the mmap_sem is hold for reading, the pmd can become
++ * THP or null or point to a pte (and in turn become "stable") at any
++ * time under pmd_read_atomic, so it's mandatory to read it atomically
++ * with cmpxchg8b.
++ */
++#ifndef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++	pmdval_t ret;
++	u32 *tmp = (u32 *)pmdp;
++
++	ret = (pmdval_t) (*tmp);
++	if (ret) {
++		/*
++		 * If the low part is null, we must not read the high part
++		 * or we can end up with a partial pmd.
++		 */
++		smp_rmb();
++		ret |= ((pmdval_t)*(tmp + 1)) << 32;
++	}
++
++	return (pmd_t) { ret };
++}
++#else /* CONFIG_TRANSPARENT_HUGEPAGE */
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++	return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
++}
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index fcc13ac..d77c97d 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -635,11 +635,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
+ 
+ static void acpi_battery_refresh(struct acpi_battery *battery)
+ {
++	int power_unit;
++
+ 	if (!battery->bat.dev)
+ 		return;
+ 
++	power_unit = battery->power_unit;
++
+ 	acpi_battery_get_info(battery);
+-	/* The battery may have changed its reporting units. */
++
++	if (power_unit == battery->power_unit)
++		return;
++
++	/* The battery has changed its reporting units. */
+ 	sysfs_remove_battery(battery);
+ 	sysfs_add_battery(battery);
+ }
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 5d1d076..d452592 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+ 			} else if (skb && card->using_dma) {
+ 				SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ 								       skb->len, PCI_DMA_TODEVICE);
++				card->tx_skb[port] = skb;
+ 				iowrite32(SKB_CB(skb)->dma_addr,
+ 					  card->config_regs + TX_DMA_ADDR(port));
+ 			}
+@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		db_fpga_upgrade = db_firmware_upgrade = 0;
+ 	}
+ 
+-	if (card->fpga_version >= DMA_SUPPORTED){
++	if (card->fpga_version >= DMA_SUPPORTED) {
++		pci_set_master(dev);
+ 		card->using_dma = 1;
+ 	} else {
+ 		card->using_dma = 0;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index a714c73..305e752 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -78,6 +78,9 @@ static struct usb_device_id ath3k_table[] = {
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+ 
++	/* Atheros AR5BBU22 with sflash firmware */
++	{ USB_DEVICE(0x0489, 0xE03C) },
++
+ 	{ }	/* Terminating entry */
+ };
+ 
+@@ -94,6 +97,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 
++	/* Atheros AR5BBU22 with sflash firmware */
++	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
++
+ 	{ }	/* Terminating entry */
+ };
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 8fbda40..f4585b9 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -61,7 +61,7 @@ static struct usb_device_id btusb_table[] = {
+ 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ 
+ 	/* Broadcom SoftSailing reporting vendor specific */
+-	{ USB_DEVICE(0x05ac, 0x21e1) },
++	{ USB_DEVICE(0x0a5c, 0x21e1) },
+ 
+ 	/* Apple MacBookPro 7,1 */
+ 	{ USB_DEVICE(0x05ac, 0x8213) },
+@@ -100,6 +100,17 @@ static struct usb_device_id btusb_table[] = {
+ 	/* Canyon CN-BTU1 with HID interfaces */
+ 	{ USB_DEVICE(0x0c10, 0x0000) },
+ 
++	/* Broadcom BCM20702A0 */
++	{ USB_DEVICE(0x0489, 0xe042) },
++	{ USB_DEVICE(0x0a5c, 0x21e3) },
++	{ USB_DEVICE(0x0a5c, 0x21e6) },
++	{ USB_DEVICE(0x0a5c, 0x21e8) },
++	{ USB_DEVICE(0x0a5c, 0x21f3) },
++	{ USB_DEVICE(0x413c, 0x8197) },
++
++	/* Foxconn - Hon Hai */
++	{ USB_DEVICE(0x0489, 0xe033) },
++
+ 	{ }	/* Terminating entry */
+ };
+ 
+@@ -130,6 +141,9 @@ static struct usb_device_id blacklist_table[] = {
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+ 
++	/* Atheros AR5BBU12 with sflash firmware */
++	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
++
+ 	/* Broadcom BCM2035 */
+ 	{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+ 	{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 06bc46e..c901060 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -762,10 +762,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ 		((v_sync_len & 0x30) >> 4);
+ 
+ 	dtd->part2.dtd_flags = 0x18;
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+ 	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		dtd->part2.dtd_flags |= 0x2;
++		dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+ 	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+-		dtd->part2.dtd_flags |= 0x4;
++		dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+ 
+ 	dtd->part2.sdvo_flags = 0;
+ 	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+@@ -799,9 +801,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ 	mode->clock = dtd->part1.clock * 10;
+ 
+ 	mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+-	if (dtd->part2.dtd_flags & 0x2)
++	if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
++		mode->flags |= DRM_MODE_FLAG_INTERLACE;
++	if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ 		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+-	if (dtd->part2.dtd_flags & 0x4)
++	if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ 		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+index 4f4e23b..c5c8ddf 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
+     u16 output_flags;
+ } __attribute__((packed));
+ 
++/* Note: SDVO detailed timing flags match EDID misc flags. */
++#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
++#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
++#define DTD_FLAG_INTERLACE	(1 << 7)
++
+ /** This matches the EDID DTD structure, more or less */
+ struct intel_sdvo_dtd {
+     struct {
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index f57b08b..2136e6b 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1301,6 +1301,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
+ 
+ 	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ 	I915_WRITE(TV_CTL, save_tv_ctl);
++	POSTING_READ(TV_CTL);
++
++	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
++	intel_wait_for_vblank(intel_tv->base.base.dev,
++			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ 
+ 	/* Restore interrupt config */
+ 	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index fe052c6..8846bad 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -926,6 +926,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+ 		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ 		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ 		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
++		if ((rdev->family == CHIP_JUNIPER) ||
++		    (rdev->family == CHIP_CYPRESS) ||
++		    (rdev->family == CHIP_HEMLOCK) ||
++		    (rdev->family == CHIP_BARTS))
++			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ 	}
+ 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+@@ -2064,9 +2069,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ 		WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ 		WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ 		WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+-        }
++	}
+ 
+-	grbm_gfx_index |= SE_BROADCAST_WRITES;
++	grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ 	WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ 	WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index b7b2714..6078ae4 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -230,6 +230,7 @@
+ #define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+ #define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+ #define	MC_VM_MD_L1_TLB2_CNTL				0x265C
++#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+ 
+ #define	FUS_MC_VM_MD_L1_TLB0_CNTL			0x265C
+ #define	FUS_MC_VM_MD_L1_TLB1_CNTL			0x2660
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index a324564..ef6b426 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -480,7 +480,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 	 */
+ 	if ((dev->pdev->device == 0x9498) &&
+ 	    (dev->pdev->subsystem_vendor == 0x1682) &&
+-	    (dev->pdev->subsystem_device == 0x2452)) {
++	    (dev->pdev->subsystem_device == 0x2452) &&
++	    (i2c_bus->valid == false) &&
++	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+ 		struct radeon_device *rdev = dev->dev_private;
+ 		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 84cf82f..51d20aa 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
+ 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
++	if (rdev->family == CHIP_RV740)
++		WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 79fa588..7538092 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -174,6 +174,7 @@
+ #define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+ #define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+ #define	MC_VM_MD_L1_TLB2_CNTL				0x265C
++#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+ #define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+ #define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+ #define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 81b6850..7632edb 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1809,6 +1809,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+ 			spin_unlock(&glob->lru_lock);
+ 			(void) ttm_bo_cleanup_refs(bo, false, false, false);
+ 			kref_put(&bo->list_kref, ttm_bo_release_list);
++			spin_lock(&glob->lru_lock);
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
+index ccbeaa1..cc81cd6 100644
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -360,6 +360,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ 
+ 		buf += mtd->oobsize + mtd->writesize;
+ 		len -= mtd->writesize;
++		offs += mtd->writesize;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index ab4723d..735f726 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -247,7 +247,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ xmit_world:
+ 	skb->ip_summed = ip_summed;
+-	skb_set_dev(skb, vlan->lowerdev);
++	skb->dev = vlan->lowerdev;
+ 	return dev_queue_xmit(skb);
+ }
+ 
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+index 592b0cf..2aed7a0 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -878,6 +878,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ 	if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
+ 	    (priv->bt_full_concurrent != full_concurrent)) {
+ 		priv->bt_full_concurrent = full_concurrent;
++		priv->last_bt_traffic_load = priv->bt_traffic_load;
+ 
+ 		/* Update uCode's rate table. */
+ 		tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
+index 85a7101..4cf5c2e 100644
+--- a/drivers/net/wireless/wl1251/sdio.c
++++ b/drivers/net/wireless/wl1251/sdio.c
+@@ -259,6 +259,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+ 	}
+ 
+ 	if (wl->irq) {
++		irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
+ 		ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+ 		if (ret < 0) {
+ 			wl1251_error("request_irq() failed: %d", ret);
+@@ -266,7 +267,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+ 		}
+ 
+ 		irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+-		disable_irq(wl->irq);
+ 
+ 		wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+ 		wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
+index af6448c..49f3651 100644
+--- a/drivers/net/wireless/wl1251/spi.c
++++ b/drivers/net/wireless/wl1251/spi.c
+@@ -280,6 +280,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
+ 
+ 	wl->use_eeprom = pdata->use_eeprom;
+ 
++	irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
+ 	ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
+ 	if (ret < 0) {
+ 		wl1251_error("request_irq() failed: %d", ret);
+@@ -288,8 +289,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
+ 
+ 	irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ 
+-	disable_irq(wl->irq);
+-
+ 	ret = wl1251_init_ieee80211(wl);
+ 	if (ret)
+ 		goto out_irq;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 72ab1e6..99fc45b 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1380,16 +1380,19 @@ static int scsi_lld_busy(struct request_queue *q)
+ {
+ 	struct scsi_device *sdev = q->queuedata;
+ 	struct Scsi_Host *shost;
+-	struct scsi_target *starget;
+ 
+ 	if (!sdev)
+ 		return 0;
+ 
+ 	shost = sdev->host;
+-	starget = scsi_target(sdev);
+ 
+-	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
+-	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
++	/*
++	 * Ignore host/starget busy state.
++	 * Since block layer does not have a concept of fairness across
++	 * multiple queues, congestion of host/starget needs to be handled
++	 * in SCSI layer.
++	 */
++	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
+index 74708fc..ae78148 100644
+--- a/drivers/scsi/scsi_wait_scan.c
++++ b/drivers/scsi/scsi_wait_scan.c
+@@ -12,7 +12,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/device.h>
+-#include <scsi/scsi_scan.h>
++#include "scsi_priv.h"
+ 
+ static int __init wait_scan_init(void)
+ {
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 6255fa8..7cb9dd2 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -43,6 +43,7 @@
+ 
+ #define CIFS_MIN_RCV_POOL 4
+ 
++#define MAX_REOPEN_ATT	5 /* these many maximum attempts to reopen a file */
+ /*
+  * default attribute cache timeout (jiffies)
+  */
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index a9b4a24..9040cb0 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -973,10 +973,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ 					bool fsuid_only)
+ {
+-	struct cifsFileInfo *open_file;
++	struct cifsFileInfo *open_file, *inv_file = NULL;
+ 	struct cifs_sb_info *cifs_sb;
+ 	bool any_available = false;
+ 	int rc;
++	unsigned int refind = 0;
+ 
+ 	/* Having a null inode here (because mapping->host was set to zero by
+ 	the VFS or MM) should not happen but we had reports of on oops (due to
+@@ -996,40 +997,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ 
+ 	spin_lock(&cifs_file_list_lock);
+ refind_writable:
++	if (refind > MAX_REOPEN_ATT) {
++		spin_unlock(&cifs_file_list_lock);
++		return NULL;
++	}
+ 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ 		if (!any_available && open_file->pid != current->tgid)
+ 			continue;
+ 		if (fsuid_only && open_file->uid != current_fsuid())
+ 			continue;
+ 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+-			cifsFileInfo_get(open_file);
+-
+ 			if (!open_file->invalidHandle) {
+ 				/* found a good writable file */
++				cifsFileInfo_get(open_file);
+ 				spin_unlock(&cifs_file_list_lock);
+ 				return open_file;
++			} else {
++				if (!inv_file)
++					inv_file = open_file;
+ 			}
+-
+-			spin_unlock(&cifs_file_list_lock);
+-
+-			/* Had to unlock since following call can block */
+-			rc = cifs_reopen_file(open_file, false);
+-			if (!rc)
+-				return open_file;
+-
+-			/* if it fails, try another handle if possible */
+-			cFYI(1, "wp failed on reopen file");
+-			cifsFileInfo_put(open_file);
+-
+-			spin_lock(&cifs_file_list_lock);
+-
+-			/* else we simply continue to the next entry. Thus
+-			   we do not loop on reopen errors.  If we
+-			   can not reopen the file, for example if we
+-			   reconnected to a server with another client
+-			   racing to delete or lock the file we would not
+-			   make progress if we restarted before the beginning
+-			   of the loop here. */
+ 		}
+ 	}
+ 	/* couldn't find useable FH with same pid, try any available */
+@@ -1037,7 +1023,30 @@ refind_writable:
+ 		any_available = true;
+ 		goto refind_writable;
+ 	}
++
++	if (inv_file) {
++		any_available = false;
++		cifsFileInfo_get(inv_file);
++	}
++
+ 	spin_unlock(&cifs_file_list_lock);
++
++	if (inv_file) {
++		rc = cifs_reopen_file(inv_file, false);
++		if (!rc)
++			return inv_file;
++		else {
++			spin_lock(&cifs_file_list_lock);
++			list_move_tail(&inv_file->flist,
++					&cifs_inode->openFileList);
++			spin_unlock(&cifs_file_list_lock);
++			cifsFileInfo_put(inv_file);
++			spin_lock(&cifs_file_list_lock);
++			++refind;
++			goto refind_writable;
++		}
++	}
++
+ 	return NULL;
+ }
+ 
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 808c554..4cbe1c2 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -35,7 +35,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 		handle_t *handle = NULL;
+ 		int err, migrate = 0;
+ 		struct ext4_iloc iloc;
+-		unsigned int oldflags;
++		unsigned int oldflags, mask, i;
+ 		unsigned int jflag;
+ 
+ 		if (!inode_owner_or_capable(inode))
+@@ -112,9 +112,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 		if (err)
+ 			goto flags_err;
+ 
+-		flags = flags & EXT4_FL_USER_MODIFIABLE;
+-		flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
+-		ei->i_flags = flags;
++		for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
++			if (!(mask & EXT4_FL_USER_MODIFIABLE))
++				continue;
++			if (mask & flags)
++				ext4_set_inode_flag(inode, i);
++			else
++				ext4_clear_inode_flag(inode, i);
++		}
+ 
+ 		ext4_set_inode_flags(inode);
+ 		inode->i_ctime = ext4_current_time(inode);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 0f1be7f..b6adf68 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2528,6 +2528,9 @@ int ext4_mb_release(struct super_block *sb)
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ 
++	if (sbi->s_proc)
++		remove_proc_entry("mb_groups", sbi->s_proc);
++
+ 	if (sbi->s_group_info) {
+ 		for (i = 0; i < ngroups; i++) {
+ 			grinfo = ext4_get_group_info(sb, i);
+@@ -2575,8 +2578,6 @@ int ext4_mb_release(struct super_block *sb)
+ 	}
+ 
+ 	free_percpu(sbi->s_locality_groups);
+-	if (sbi->s_proc)
+-		remove_proc_entry("mb_groups", sbi->s_proc);
+ 
+ 	return 0;
+ }
+@@ -4583,6 +4584,7 @@ do_more:
+ 		 */
+ 		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ 		if (!new_entry) {
++			ext4_mb_unload_buddy(&e4b);
+ 			err = -ENOMEM;
+ 			goto error_return;
+ 		}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index df121b2..113b107 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -433,6 +433,7 @@ void __ext4_error(struct super_block *sb, const char *function,
+ 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+ 	       sb->s_id, function, line, current->comm, &vaf);
+ 	va_end(args);
++	save_error_info(sb, function, line);
+ 
+ 	ext4_handle_error(sb);
+ }
+@@ -3618,7 +3619,8 @@ no_journal:
+ 		goto failed_mount4;
+ 	}
+ 
+-	ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
++	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
++		sb->s_flags |= MS_RDONLY;
+ 
+ 	/* determine the minimum size of new large inodes, if present */
+ 	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index edc1c4a..b3d8f51 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1244,8 +1244,9 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+ 		list_del_init(&p->mnt_expire);
+ 		list_del_init(&p->mnt_list);
+ 		__touch_mnt_namespace(p->mnt_ns);
++		if (p->mnt_ns)
++			__mnt_make_shortterm(p);
+ 		p->mnt_ns = NULL;
+-		__mnt_make_shortterm(p);
+ 		list_del_init(&p->mnt_child);
+ 		if (p->mnt_parent != p) {
+ 			p->mnt_parent->mnt_ghosts++;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 30f6548..b7a7e5f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -94,6 +94,8 @@ static int nfs4_map_errors(int err)
+ 	case -NFS4ERR_BADOWNER:
+ 	case -NFS4ERR_BADNAME:
+ 		return -EINVAL;
++	case -NFS4ERR_SHARE_DENIED:
++		return -EACCES;
+ 	default:
+ 		dprintk("%s could not handle NFSv4 error %d\n",
+ 				__func__, -err);
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index a03c098..831924a 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++#ifndef pmd_read_atomic
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++	/*
++	 * Depend on compiler for an atomic pmd read. NOTE: this is
++	 * only going to work, if the pmdval_t isn't larger than
++	 * an unsigned long.
++	 */
++	return *pmdp;
++}
++#endif
++
+ /*
+  * This function is meant to be used by sites walking pagetables with
+  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+@@ -458,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
+  * undefined so behaving like if the pmd was none is safe (because it
+  * can return none anyway). The compiler level barrier() is critically
+  * important to compute the two checks atomically on the same pmdval.
++ *
++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
++ * care of reading the pmd atomically to avoid SMP race conditions
++ * against pmd_populate() when the mmap_sem is hold for reading by the
++ * caller (a special atomic read not done by "gcc" as in the generic
++ * version above, is also needed when THP is disabled because the page
++ * fault can populate the pmd from under us).
+  */
+ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ {
+-	/* depend on compiler for an atomic pmd read */
+-	pmd_t pmdval = *pmd;
++	pmd_t pmdval = pmd_read_atomic(pmd);
+ 	/*
+ 	 * The barrier will stabilize the pmdval in a register or on
+ 	 * the stack so that it will stop changing under the code.
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 14b6cd0..4306811 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -181,6 +181,7 @@
+ 	{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+@@ -198,6 +199,7 @@
+ 	{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 33b5968..c6d6d48 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1453,15 +1453,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-#ifndef CONFIG_NET_NS
+-static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
+-{
+-	skb->dev = dev;
+-}
+-#else /* CONFIG_NET_NS */
+-void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
+-#endif
+-
+ static inline bool netdev_uses_trailer_tags(struct net_device *dev)
+ {
+ #ifdef CONFIG_NET_DSA_TAG_TRAILER
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f13b52b..37b643b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1633,8 +1633,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ {
+ 	int delta = 0;
+ 
+-	if (headroom < NET_SKB_PAD)
+-		headroom = NET_SKB_PAD;
+ 	if (headroom > skb_headroom(skb))
+ 		delta = headroom - skb_headroom(skb);
+ 
+diff --git a/include/net/dst.h b/include/net/dst.h
+index d020134..7907ff1 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -78,6 +78,7 @@ struct dst_entry {
+ #define DST_NOHASH		0x0008
+ #define DST_NOCACHE		0x0010
+ #define DST_NOCOUNT		0x0020
++#define DST_XFRM_TUNNEL		0x0100
+ 	union {
+ 		struct dst_entry	*next;
+ 		struct rtable __rcu	*rt_next;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index b2c2366..f686066 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -700,4 +700,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
+ 	addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+ }
+ 
++/* The cookie is always 0 since this is how it's used in the
++ * pmtu code.
++ */
++static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
++{
++	if (t->dst && !dst_check(t->dst, 0)) {
++		dst_release(t->dst);
++		t->dst = NULL;
++	}
++
++	return t->dst;
++}
++
+ #endif /* __net_sctp_h__ */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 6072d74..769935d 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -665,7 +665,7 @@ static enum page_references page_check_references(struct page *page,
+ 		return PAGEREF_RECLAIM;
+ 
+ 	if (referenced_ptes) {
+-		if (PageAnon(page))
++		if (PageSwapBacked(page))
+ 			return PAGEREF_ACTIVATE;
+ 		/*
+ 		 * All mapped pages start out with page table
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 5b4f51d..d548456 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -154,7 +154,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ 		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
+ 	}
+ 
+-	skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
++	skb->dev = vlan_dev_info(dev)->real_dev;
+ 	len = skb->len;
+ 	ret = dev_queue_xmit(skb);
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1e77897..a71eafc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1533,10 +1533,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ 		kfree_skb(skb);
+ 		return NET_RX_DROP;
+ 	}
+-	skb_set_dev(skb, dev);
++	skb->dev = dev;
++	skb_dst_drop(skb);
+ 	skb->tstamp.tv64 = 0;
+ 	skb->pkt_type = PACKET_HOST;
+ 	skb->protocol = eth_type_trans(skb, dev);
++	skb->mark = 0;
++	secpath_reset(skb);
++	nf_reset(skb);
+ 	return netif_rx(skb);
+ }
+ EXPORT_SYMBOL_GPL(dev_forward_skb);
+@@ -1791,36 +1795,6 @@ void netif_device_attach(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_device_attach);
+ 
+-/**
+- * skb_dev_set -- assign a new device to a buffer
+- * @skb: buffer for the new device
+- * @dev: network device
+- *
+- * If an skb is owned by a device already, we have to reset
+- * all data private to the namespace a device belongs to
+- * before assigning it a new device.
+- */
+-#ifdef CONFIG_NET_NS
+-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
+-{
+-	skb_dst_drop(skb);
+-	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
+-		secpath_reset(skb);
+-		nf_reset(skb);
+-		skb_init_secmark(skb);
+-		skb->mark = 0;
+-		skb->priority = 0;
+-		skb->nf_trace = 0;
+-		skb->ipvs_property = 0;
+-#ifdef CONFIG_NET_SCHED
+-		skb->tc_index = 0;
+-#endif
+-	}
+-	skb->dev = dev;
+-}
+-EXPORT_SYMBOL(skb_set_dev);
+-#endif /* CONFIG_NET_NS */
+-
+ /*
+  * Invalidate hardware checksum when packet is to be mangled, and
+  * complete checksum manually on outgoing path.
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index e35a6fb..c0e0f76 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1932,7 +1932,7 @@ static int pktgen_device_event(struct notifier_block *unused,
+ {
+ 	struct net_device *dev = ptr;
+ 
+-	if (!net_eq(dev_net(dev), &init_net))
++	if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
+ 		return NOTIFY_DONE;
+ 
+ 	/* It is OK that we do not hold the group lock right now,
+@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
+ {
+ 	struct pktgen_thread *t;
+ 	struct list_head *q, *n;
++	LIST_HEAD(list);
+ 
+ 	/* Stop all interfaces & threads */
+ 	pktgen_exiting = true;
+ 
+-	list_for_each_safe(q, n, &pktgen_threads) {
++	mutex_lock(&pktgen_thread_lock);
++	list_splice_init(&pktgen_threads, &list);
++	mutex_unlock(&pktgen_thread_lock);
++
++	list_for_each_safe(q, n, &list) {
+ 		t = list_entry(q, struct pktgen_thread, th_list);
++		list_del(&t->th_list);
+ 		kthread_stop(t->tsk);
+ 		kfree(t);
+ 	}
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index a5b4134..530787b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -457,28 +457,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
+ 	struct esp_data *esp = x->data;
+ 	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
+ 	u32 align = max_t(u32, blksize, esp->padlen);
+-	u32 rem;
+-
+-	mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
+-	rem = mtu & (align - 1);
+-	mtu &= ~(align - 1);
++	unsigned int net_adj;
+ 
+ 	switch (x->props.mode) {
+-	case XFRM_MODE_TUNNEL:
+-		break;
+-	default:
+ 	case XFRM_MODE_TRANSPORT:
+-		/* The worst case */
+-		mtu -= blksize - 4;
+-		mtu += min_t(u32, blksize - 4, rem);
+-		break;
+ 	case XFRM_MODE_BEET:
+-		/* The worst case. */
+-		mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
++		net_adj = sizeof(struct iphdr);
+ 		break;
++	case XFRM_MODE_TUNNEL:
++		net_adj = 0;
++		break;
++	default:
++		BUG();
+ 	}
+ 
+-	return mtu - 2;
++	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
++		 net_adj) & ~(align - 1)) + (net_adj - 2);
+ }
+ 
+ static void esp4_err(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 33e2c35..7e454ba 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -142,6 +142,18 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
+ };
+ 
+ /* Release a nexthop info record */
++static void free_fib_info_rcu(struct rcu_head *head)
++{
++	struct fib_info *fi = container_of(head, struct fib_info, rcu);
++
++	change_nexthops(fi) {
++		if (nexthop_nh->nh_dev)
++			dev_put(nexthop_nh->nh_dev);
++	} endfor_nexthops(fi);
++
++	release_net(fi->fib_net);
++	kfree(fi);
++}
+ 
+ void free_fib_info(struct fib_info *fi)
+ {
+@@ -149,14 +161,8 @@ void free_fib_info(struct fib_info *fi)
+ 		pr_warning("Freeing alive fib_info %p\n", fi);
+ 		return;
+ 	}
+-	change_nexthops(fi) {
+-		if (nexthop_nh->nh_dev)
+-			dev_put(nexthop_nh->nh_dev);
+-		nexthop_nh->nh_dev = NULL;
+-	} endfor_nexthops(fi);
+ 	fib_info_cnt--;
+-	release_net(fi->fib_net);
+-	kfree_rcu(fi, rcu);
++	call_rcu(&fi->rcu, free_fib_info_rcu);
+ }
+ 
+ void fib_release_info(struct fib_info *fi)
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 58c25ea..0d884eb 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1371,6 +1371,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
+ 
+ 			if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+ 				continue;
++			if (fi->fib_dead)
++				continue;
+ 			if (fa->fa_info->fib_scope < flp->flowi4_scope)
+ 				continue;
+ 			fib_alias_accessed(fa);
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 1ac7938..65dd543 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -411,19 +411,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
+ 	struct esp_data *esp = x->data;
+ 	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
+ 	u32 align = max_t(u32, blksize, esp->padlen);
+-	u32 rem;
++	unsigned int net_adj;
+ 
+-	mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
+-	rem = mtu & (align - 1);
+-	mtu &= ~(align - 1);
+-
+-	if (x->props.mode != XFRM_MODE_TUNNEL) {
+-		u32 padsize = ((blksize - 1) & 7) + 1;
+-		mtu -= blksize - padsize;
+-		mtu += min_t(u32, blksize - padsize, rem);
+-	}
++	if (x->props.mode != XFRM_MODE_TUNNEL)
++		net_adj = sizeof(struct ipv6hdr);
++	else
++		net_adj = 0;
+ 
+-	return mtu - 2;
++	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
++		 net_adj) & ~(align - 1)) + (net_adj - 2);
+ }
+ 
+ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 9cbf176..ae9f6d4 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1194,6 +1194,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
+ 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
+ }
+ 
++static void ip6_append_data_mtu(int *mtu,
++				int *maxfraglen,
++				unsigned int fragheaderlen,
++				struct sk_buff *skb,
++				struct rt6_info *rt)
++{
++	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
++		if (skb == NULL) {
++			/* first fragment, reserve header_len */
++			*mtu = *mtu - rt->dst.header_len;
++
++		} else {
++			/*
++			 * this fragment is not first, the headers
++			 * space is regarded as data space.
++			 */
++			*mtu = dst_mtu(rt->dst.path);
++		}
++		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
++			      + fragheaderlen - sizeof(struct frag_hdr);
++	}
++}
++
+ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 	int offset, int len, int odd, struct sk_buff *skb),
+ 	void *from, int length, int transhdrlen,
+@@ -1203,7 +1226,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct inet_cork *cork;
+-	struct sk_buff *skb;
++	struct sk_buff *skb, *skb_prev = NULL;
+ 	unsigned int maxfraglen, fragheaderlen;
+ 	int exthdrlen;
+ 	int hh_len;
+@@ -1260,8 +1283,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 		inet->cork.fl.u.ip6 = *fl6;
+ 		np->cork.hop_limit = hlimit;
+ 		np->cork.tclass = tclass;
+-		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+-		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
++		if (rt->dst.flags & DST_XFRM_TUNNEL)
++			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
++			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
++		else
++			mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
++			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ 		if (np->frag_size < mtu) {
+ 			if (np->frag_size)
+ 				mtu = np->frag_size;
+@@ -1356,38 +1383,43 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 			unsigned int fraglen;
+ 			unsigned int fraggap;
+ 			unsigned int alloclen;
+-			struct sk_buff *skb_prev;
+ alloc_new_skb:
+-			skb_prev = skb;
+-
+ 			/* There's no room in the current skb */
+-			if (skb_prev)
+-				fraggap = skb_prev->len - maxfraglen;
++			if (skb)
++				fraggap = skb->len - maxfraglen;
+ 			else
+ 				fraggap = 0;
++			/* update mtu and maxfraglen if necessary */
++			if (skb == NULL || skb_prev == NULL)
++				ip6_append_data_mtu(&mtu, &maxfraglen,
++						    fragheaderlen, skb, rt);
++
++			skb_prev = skb;
+ 
+ 			/*
+ 			 * If remaining data exceeds the mtu,
+ 			 * we know we need more fragment(s).
+ 			 */
+ 			datalen = length + fraggap;
+-			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+-				datalen = maxfraglen - fragheaderlen;
+ 
+-			fraglen = datalen + fragheaderlen;
++			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
++				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
+ 			if ((flags & MSG_MORE) &&
+ 			    !(rt->dst.dev->features&NETIF_F_SG))
+ 				alloclen = mtu;
+ 			else
+ 				alloclen = datalen + fragheaderlen;
+ 
+-			/*
+-			 * The last fragment gets additional space at tail.
+-			 * Note: we overallocate on fragments with MSG_MODE
+-			 * because we have no idea if we're the last one.
+-			 */
+-			if (datalen == length + fraggap)
+-				alloclen += rt->dst.trailer_len;
++			if (datalen != length + fraggap) {
++				/*
++				 * this is not the last fragment, the trailer
++				 * space is regarded as data space.
++				 */
++				datalen += rt->dst.trailer_len;
++			}
++
++			alloclen += rt->dst.trailer_len;
++			fraglen = datalen + fragheaderlen;
+ 
+ 			/*
+ 			 * We just reserve space for fragment header.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index ea52d02..78bc442 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -251,9 +251,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+-	int ret = -EINVAL;
++	int ret;
+ 	int chk_addr_ret;
+ 
++	if (!sock_flag(sk, SOCK_ZAPPED))
++		return -EINVAL;
++	if (addr_len < sizeof(struct sockaddr_l2tpip))
++		return -EINVAL;
++	if (addr->l2tp_family != AF_INET)
++		return -EINVAL;
++
+ 	ret = -EADDRINUSE;
+ 	read_lock_bh(&l2tp_ip_lock);
+ 	if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
+@@ -283,6 +290,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	sk_del_node_init(sk);
+ 	write_unlock_bh(&l2tp_ip_lock);
+ 	ret = 0;
++	sock_reset_flag(sk, SOCK_ZAPPED);
++
+ out:
+ 	release_sock(sk);
+ 
+@@ -303,13 +312,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
+ 	__be32 saddr;
+ 	int oif, rc;
+ 
+-	rc = -EINVAL;
++	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
++		return -EINVAL;
++
+ 	if (addr_len < sizeof(*lsa))
+-		goto out;
++		return -EINVAL;
+ 
+-	rc = -EAFNOSUPPORT;
+ 	if (lsa->l2tp_family != AF_INET)
+-		goto out;
++		return -EAFNOSUPPORT;
+ 
+ 	lock_sock(sk);
+ 
+@@ -363,6 +373,14 @@ out:
+ 	return rc;
+ }
+ 
++static int l2tp_ip_disconnect(struct sock *sk, int flags)
++{
++	if (sock_flag(sk, SOCK_ZAPPED))
++		return 0;
++
++	return udp_disconnect(sk, flags);
++}
++
+ static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
+ 			   int *uaddr_len, int peer)
+ {
+@@ -591,7 +609,7 @@ static struct proto l2tp_ip_prot = {
+ 	.close		   = l2tp_ip_close,
+ 	.bind		   = l2tp_ip_bind,
+ 	.connect	   = l2tp_ip_connect,
+-	.disconnect	   = udp_disconnect,
++	.disconnect	   = l2tp_ip_disconnect,
+ 	.ioctl		   = udp_ioctl,
+ 	.destroy	   = l2tp_ip_destroy_sock,
+ 	.setsockopt	   = ip_setsockopt,
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 2124db8..11d9d49 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1254,6 +1254,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 		}
+ 	}
+ 
++	/* add back keys */
++	list_for_each_entry(sdata, &local->interfaces, list)
++		if (ieee80211_sdata_running(sdata))
++			ieee80211_enable_keys(sdata);
++
++ wake_up:
+ 	/*
+ 	 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
+ 	 * sessions can be established after a resume.
+@@ -1275,12 +1281,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 		mutex_unlock(&local->sta_mtx);
+ 	}
+ 
+-	/* add back keys */
+-	list_for_each_entry(sdata, &local->interfaces, list)
+-		if (ieee80211_sdata_running(sdata))
+-			ieee80211_enable_keys(sdata);
+-
+- wake_up:
+ 	ieee80211_wake_queues_by_reason(hw,
+ 			IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 817174e..8fc4dcd 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ 	 */
+ 	skb_set_owner_w(nskb, sk);
+ 
+-	/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
+-	if (!dst || (dst->obsolete > 1)) {
+-		dst_release(dst);
++	if (!sctp_transport_dst_check(tp)) {
+ 		sctp_transport_route(tp, NULL, sctp_sk(sk));
+ 		if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
+ 			sctp_assoc_sync_pmtu(asoc);
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 394c57c..8da4481 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
+ 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ 
+-/* this is a complete rip-off from __sk_dst_check
+- * the cookie is always 0 since this is how it's used in the
+- * pmtu code
+- */
+-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+-{
+-	struct dst_entry *dst = t->dst;
+-
+-	if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
+-		dst_release(t->dst);
+-		t->dst = NULL;
+-		return NULL;
+-	}
+-
+-	return dst;
+-}
+-
+ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+ {
+ 	struct dst_entry *dst;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 7803eb6..0c0e40e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1917,6 +1917,9 @@ no_transform:
+ 	}
+ ok:
+ 	xfrm_pols_put(pols, drop_pols);
++	if (dst && dst->xfrm &&
++	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
++		dst->flags |= DST_XFRM_TUNNEL;
+ 	return dst;
+ 
+ nopol:
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index b8dcbf4..506c0fa 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -670,6 +670,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
+ 	int count = 0, needs_knot = 0;
+ 	int err;
+ 
++	kfree(subs->rate_list.list);
++	subs->rate_list.list = NULL;
++
+ 	list_for_each_entry(fp, &subs->fmt_list, list) {
+ 		if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
+ 			return 0;

Added: genpatches-2.6/trunk/3.0/1034_linux-3.0.35.patch
===================================================================
--- genpatches-2.6/trunk/3.0/1034_linux-3.0.35.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/1034_linux-3.0.35.patch	2012-06-19 00:00:27 UTC (rev 2162)
@@ -0,0 +1,811 @@
+diff --git a/Makefile b/Makefile
+index 61ef485..fe91076 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 0
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Sneaky Weasel
+ 
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index f832773..449a7e0 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -187,8 +187,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ 
+ static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
+ {
+-	if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
+-	    && entry->jump[1] == 0x396b0000 + (val & 0xffff))
++	if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
++	    && entry->jump[1] == 0x398c0000 + (val & 0xffff))
+ 		return 1;
+ 	return 0;
+ }
+@@ -215,10 +215,9 @@ static uint32_t do_plt_call(void *location,
+ 		entry++;
+ 	}
+ 
+-	/* Stolen from Paul Mackerras as well... */
+-	entry->jump[0] = 0x3d600000+((val+0x8000)>>16);	/* lis r11,sym@ha */
+-	entry->jump[1] = 0x396b0000 + (val&0xffff);	/* addi r11,r11,sym@l*/
+-	entry->jump[2] = 0x7d6903a6;			/* mtctr r11 */
++	entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
++	entry->jump[1] = 0x398c0000 + (val&0xffff);     /* addi r12,r12,sym@l*/
++	entry->jump[2] = 0x7d8903a6;                    /* mtctr r12 */
+ 	entry->jump[3] = 0x4e800420;			/* bctr */
+ 
+ 	DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index be6d9e3..3470624 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
+ 	pxor IN3, STATE4
+ 	movaps IN4, IV
+ #else
+-	pxor (INP), STATE2
+-	pxor 0x10(INP), STATE3
+ 	pxor IN1, STATE4
+ 	movaps IN2, IV
++	movups (INP), IN1
++	pxor IN1, STATE2
++	movups 0x10(INP), IN2
++	pxor IN2, STATE3
+ #endif
+ 	movups STATE1, (OUTP)
+ 	movups STATE2, 0x10(OUTP)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index bb0adad..dc4fb77 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -52,6 +52,7 @@ struct threshold_block {
+ 	unsigned int		cpu;
+ 	u32			address;
+ 	u16			interrupt_enable;
++	bool			interrupt_capable;
+ 	u16			threshold_limit;
+ 	struct kobject		kobj;
+ 	struct list_head	miscj;
+@@ -86,6 +87,21 @@ struct thresh_restart {
+ 	u16			old_limit;
+ };
+ 
++static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
++{
++	/*
++	 * bank 4 supports APIC LVT interrupts implicitly since forever.
++	 */
++	if (bank == 4)
++		return true;
++
++	/*
++	 * IntP: interrupt present; if this bit is set, the thresholding
++	 * bank can generate APIC LVT interrupts
++	 */
++	return msr_high_bits & BIT(28);
++}
++
+ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
+ {
+ 	int msr = (hi & MASK_LVTOFF_HI) >> 20;
+@@ -107,8 +123,10 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
+ 	return 1;
+ };
+ 
+-/* must be called with correct cpu affinity */
+-/* Called via smp_call_function_single() */
++/*
++ * Called via smp_call_function_single(), must be called with correct
++ * cpu affinity.
++ */
+ static void threshold_restart_bank(void *_tr)
+ {
+ 	struct thresh_restart *tr = _tr;
+@@ -131,6 +149,12 @@ static void threshold_restart_bank(void *_tr)
+ 		    (new_count & THRESHOLD_MAX);
+ 	}
+ 
++	/* clear IntType */
++	hi &= ~MASK_INT_TYPE_HI;
++
++	if (!tr->b->interrupt_capable)
++		goto done;
++
+ 	if (tr->set_lvt_off) {
+ 		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
+ 			/* set new lvt offset */
+@@ -139,9 +163,10 @@ static void threshold_restart_bank(void *_tr)
+ 		}
+ 	}
+ 
+-	tr->b->interrupt_enable ?
+-	    (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
+-	    (hi &= ~MASK_INT_TYPE_HI);
++	if (tr->b->interrupt_enable)
++		hi |= INT_TYPE_APIC;
++
++ done:
+ 
+ 	hi |= MASK_COUNT_EN_HI;
+ 	wrmsr(tr->b->address, lo, hi);
+@@ -206,14 +231,18 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+ 			if (shared_bank[bank] && c->cpu_core_id)
+ 				break;
+ #endif
+-			offset = setup_APIC_mce(offset,
+-						(high & MASK_LVTOFF_HI) >> 20);
+ 
+ 			memset(&b, 0, sizeof(b));
+-			b.cpu		= cpu;
+-			b.bank		= bank;
+-			b.block		= block;
+-			b.address	= address;
++			b.cpu			= cpu;
++			b.bank			= bank;
++			b.block			= block;
++			b.address		= address;
++			b.interrupt_capable	= lvt_interrupt_supported(bank, high);
++
++			if (b.interrupt_capable) {
++				int new = (high & MASK_LVTOFF_HI) >> 20;
++				offset  = setup_APIC_mce(offset, new);
++			}
+ 
+ 			mce_threshold_block_init(&b, offset);
+ 			mce_threshold_vector = amd_threshold_interrupt;
+@@ -313,6 +342,9 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
+ 	struct thresh_restart tr;
+ 	unsigned long new;
+ 
++	if (!b->interrupt_capable)
++		return -EINVAL;
++
+ 	if (strict_strtoul(buf, 0, &new) < 0)
+ 		return -EINVAL;
+ 
+@@ -471,6 +503,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
+ 	b->cpu			= cpu;
+ 	b->address		= address;
+ 	b->interrupt_enable	= 0;
++	b->interrupt_capable	= lvt_interrupt_supported(bank, high);
+ 	b->threshold_limit	= THRESHOLD_MAX;
+ 
+ 	INIT_LIST_HEAD(&b->miscj);
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index db39e9e..623a335 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -1732,6 +1732,7 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
+ 
+ static int __init intel_opregion_present(void)
+ {
++	int i915 = 0;
+ #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+ 	struct pci_dev *dev = NULL;
+ 	u32 address;
+@@ -1744,10 +1745,10 @@ static int __init intel_opregion_present(void)
+ 		pci_read_config_dword(dev, 0xfc, &address);
+ 		if (!address)
+ 			continue;
+-		return 1;
++		i915 = 1;
+ 	}
+ #endif
+-	return 0;
++	return i915;
+ }
+ 
+ int acpi_video_register(void)
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index b427711..58b49d1 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -897,6 +897,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
++	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index 5da67f1..6f24604 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -211,6 +211,7 @@
+ #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
+ #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
++#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB	    0x0069
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 9a4c3ab..e8e18ca 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -61,14 +61,14 @@ static ssize_t show_power(struct device *dev,
+ 				  REG_TDP_RUNNING_AVERAGE, &val);
+ 	running_avg_capture = (val >> 4) & 0x3fffff;
+ 	running_avg_capture = sign_extend32(running_avg_capture, 21);
+-	running_avg_range = val & 0xf;
++	running_avg_range = (val & 0xf) + 1;
+ 
+ 	pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
+ 				  REG_TDP_LIMIT3, &val);
+ 
+ 	tdp_limit = val >> 16;
+-	curr_pwr_watts = tdp_limit + data->base_tdp -
+-		(s32)(running_avg_capture >> (running_avg_range + 1));
++	curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range;
++	curr_pwr_watts -= running_avg_capture;
+ 	curr_pwr_watts *= data->tdp_to_watts;
+ 
+ 	/*
+@@ -78,7 +78,7 @@ static ssize_t show_power(struct device *dev,
+ 	 * scaling factor 1/(2^16).  For conversion we use
+ 	 * (10^6)/(2^16) = 15625/(2^10)
+ 	 */
+-	curr_pwr_watts = (curr_pwr_watts * 15625) >> 10;
++	curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range);
+ 	return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
+ }
+ static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 7e5cc0b..1f8a824 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -688,7 +688,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
+  *
+  * We iterate from priv->tx_echo to priv->tx_next and check if the
+  * packet has been transmitted, echo it back to the CAN framework.
+- * If we discover a not yet transmitted package, stop looking for more.
++ * If we discover a not yet transmitted packet, stop looking for more.
+  */
+ static void c_can_do_tx(struct net_device *dev)
+ {
+@@ -700,7 +700,7 @@ static void c_can_do_tx(struct net_device *dev)
+ 	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ 		msg_obj_no = get_tx_echo_msg_obj(priv);
+ 		val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+-		if (!(val & (1 << msg_obj_no))) {
++		if (!(val & (1 << (msg_obj_no - 1)))) {
+ 			can_get_echo_skb(dev,
+ 					msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ 			stats->tx_bytes += priv->read_reg(priv,
+@@ -708,6 +708,8 @@ static void c_can_do_tx(struct net_device *dev)
+ 					& IF_MCONT_DLC_MASK;
+ 			stats->tx_packets++;
+ 			c_can_inval_msg_object(dev, 0, msg_obj_no);
++		} else {
++			break;
+ 		}
+ 	}
+ 
+@@ -952,7 +954,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
+ 	struct net_device *dev = napi->dev;
+ 	struct c_can_priv *priv = netdev_priv(dev);
+ 
+-	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
++	irqstatus = priv->irqstatus;
+ 	if (!irqstatus)
+ 		goto end;
+ 
+@@ -1030,12 +1032,11 @@ end:
+ 
+ static irqreturn_t c_can_isr(int irq, void *dev_id)
+ {
+-	u16 irqstatus;
+ 	struct net_device *dev = (struct net_device *)dev_id;
+ 	struct c_can_priv *priv = netdev_priv(dev);
+ 
+-	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+-	if (!irqstatus)
++	priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
++	if (!priv->irqstatus)
+ 		return IRQ_NONE;
+ 
+ 	/* disable all interrupts and schedule the NAPI */
+@@ -1065,10 +1066,11 @@ static int c_can_open(struct net_device *dev)
+ 		goto exit_irq_fail;
+ 	}
+ 
++	napi_enable(&priv->napi);
++
+ 	/* start the c_can controller */
+ 	c_can_start(dev);
+ 
+-	napi_enable(&priv->napi);
+ 	netif_start_queue(dev);
+ 
+ 	return 0;
+diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
+index 9b7fbef..5f32d34 100644
+--- a/drivers/net/can/c_can/c_can.h
++++ b/drivers/net/can/c_can/c_can.h
+@@ -76,6 +76,7 @@ struct c_can_priv {
+ 	unsigned int tx_next;
+ 	unsigned int tx_echo;
+ 	void *priv;		/* for board-specific data */
++	u16 irqstatus;
+ };
+ 
+ struct net_device *alloc_c_can_dev(void);
+diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
+index 8676899..2c71884 100644
+--- a/drivers/net/e1000/e1000.h
++++ b/drivers/net/e1000/e1000.h
+@@ -150,6 +150,8 @@ struct e1000_buffer {
+ 	unsigned long time_stamp;
+ 	u16 length;
+ 	u16 next_to_watch;
++	unsigned int segs;
++	unsigned int bytecount;
+ 	u16 mapped_as_page;
+ };
+ 
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+index 76e8af0..99525f9 100644
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2798,7 +2798,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
+ 	struct e1000_buffer *buffer_info;
+ 	unsigned int len = skb_headlen(skb);
+ 	unsigned int offset = 0, size, count = 0, i;
+-	unsigned int f;
++	unsigned int f, bytecount, segs;
+ 
+ 	i = tx_ring->next_to_use;
+ 
+@@ -2899,7 +2899,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
+ 		}
+ 	}
+ 
++	segs = skb_shinfo(skb)->gso_segs ?: 1;
++	/* multiply data chunks by size of headers */
++	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
++
+ 	tx_ring->buffer_info[i].skb = skb;
++	tx_ring->buffer_info[i].segs = segs;
++	tx_ring->buffer_info[i].bytecount = bytecount;
+ 	tx_ring->buffer_info[first].next_to_watch = i;
+ 
+ 	return count;
+@@ -3573,14 +3579,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ 			cleaned = (i == eop);
+ 
+ 			if (cleaned) {
+-				struct sk_buff *skb = buffer_info->skb;
+-				unsigned int segs, bytecount;
+-				segs = skb_shinfo(skb)->gso_segs ?: 1;
+-				/* multiply data chunks by size of headers */
+-				bytecount = ((segs - 1) * skb_headlen(skb)) +
+-				            skb->len;
+-				total_tx_packets += segs;
+-				total_tx_bytes += bytecount;
++				total_tx_packets += buffer_info->segs;
++				total_tx_bytes += buffer_info->bytecount;
+ 			}
+ 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ 			tx_desc->upper.data = 0;
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index ed1b432..864448b 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -943,7 +943,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ }
+ 
+ static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
++static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
+ 	.rx_urb_size = 8 * 1024,
+ 	.whitelist = {
+ 		.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+@@ -951,7 +951,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+ 	}
+ };
+ 
+-static const struct driver_info sierra_net_info_68A3 = {
++static const struct driver_info sierra_net_info_direct_ip = {
+ 	.description = "Sierra Wireless USB-to-WWAN Modem",
+ 	.flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ 	.bind = sierra_net_bind,
+@@ -959,12 +959,18 @@ static const struct driver_info sierra_net_info_68A3 = {
+ 	.status = sierra_net_status,
+ 	.rx_fixup = sierra_net_rx_fixup,
+ 	.tx_fixup = sierra_net_tx_fixup,
+-	.data = (unsigned long)&sierra_net_info_data_68A3,
++	.data = (unsigned long)&sierra_net_info_data_direct_ip,
+ };
+ 
+ static const struct usb_device_id products[] = {
+ 	{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+-	.driver_info = (unsigned long) &sierra_net_info_68A3},
++	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
++	{USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
++	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
++	{USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
++	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
++	{USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
++	.driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ 
+ 	{}, /* last item */
+ };
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index 0bd722c..5c9999d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -477,7 +477,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ 					sizeof(struct iwl_keyinfo));
+ 	priv->stations[sta_id].sta.key.key_flags =
+ 			STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+-	priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
++	priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+ 	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ 	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ 
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index d5016071..c04a025 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -858,6 +858,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
+ 		if (stat) {
+ 			generic_fillattr(inode, stat);
+ 			stat->mode = fi->orig_i_mode;
++			stat->ino = fi->orig_ino;
+ 		}
+ 	}
+ 
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index b788bec..f621550 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -82,6 +82,9 @@ struct fuse_inode {
+ 	    preserve the original mode */
+ 	mode_t orig_i_mode;
+ 
++	/** 64 bit inode number */
++	u64 orig_ino;
++
+ 	/** Version of last attribute change */
+ 	u64 attr_version;
+ 
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 38f84cd..69a1e0f 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ 	fi->nlookup = 0;
+ 	fi->attr_version = 0;
+ 	fi->writectr = 0;
++	fi->orig_ino = 0;
+ 	INIT_LIST_HEAD(&fi->write_files);
+ 	INIT_LIST_HEAD(&fi->queued_writes);
+ 	INIT_LIST_HEAD(&fi->writepages);
+@@ -140,6 +141,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
+ 	return 0;
+ }
+ 
++/*
++ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
++ * so that it will fit.
++ */
++static ino_t fuse_squash_ino(u64 ino64)
++{
++	ino_t ino = (ino_t) ino64;
++	if (sizeof(ino_t) < sizeof(u64))
++		ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
++	return ino;
++}
++
+ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ 				   u64 attr_valid)
+ {
+@@ -149,7 +162,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ 	fi->attr_version = ++fc->attr_version;
+ 	fi->i_time = attr_valid;
+ 
+-	inode->i_ino     = attr->ino;
++	inode->i_ino     = fuse_squash_ino(attr->ino);
+ 	inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
+ 	inode->i_nlink   = attr->nlink;
+ 	inode->i_uid     = attr->uid;
+@@ -175,6 +188,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ 	fi->orig_i_mode = inode->i_mode;
+ 	if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ 		inode->i_mode &= ~S_ISVTX;
++
++	fi->orig_ino = attr->ino;
+ }
+ 
+ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 03dff14..8ef48f0 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7220,11 +7220,8 @@ int sched_domain_level_max;
+ 
+ static int __init setup_relax_domain_level(char *str)
+ {
+-	unsigned long val;
+-
+-	val = simple_strtoul(str, NULL, 0);
+-	if (val < sched_domain_level_max)
+-		default_relax_domain_level = val;
++	if (kstrtoint(str, 0, &default_relax_domain_level))
++		pr_warn("Unable to set relax_domain_level\n");
+ 
+ 	return 1;
+ }
+@@ -7417,7 +7414,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+ 	if (!sd)
+ 		return child;
+ 
+-	set_domain_attribute(sd, attr);
+ 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
+ 	if (child) {
+ 		sd->level = child->level + 1;
+@@ -7425,6 +7421,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+ 		child->parent = sd;
+ 	}
+ 	sd->child = child;
++	set_domain_attribute(sd, attr);
+ 
+ 	return sd;
+ }
+diff --git a/lib/btree.c b/lib/btree.c
+index 2a34392..297124d 100644
+--- a/lib/btree.c
++++ b/lib/btree.c
+@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ 
+ 	if (head->height == 0)
+ 		return NULL;
+-retry:
+ 	longcpy(key, __key, geo->keylen);
++retry:
+ 	dec_key(geo, key);
+ 
+ 	node = head->node;
+@@ -351,7 +351,7 @@ retry:
+ 	}
+ miss:
+ 	if (retry_key) {
+-		__key = retry_key;
++		longcpy(key, retry_key, geo->keylen);
+ 		retry_key = NULL;
+ 		goto retry;
+ 	}
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 00b0abb..05f8fd4 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2060,6 +2060,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
+ 		kref_get(&reservations->refs);
+ }
+ 
++static void resv_map_put(struct vm_area_struct *vma)
++{
++	struct resv_map *reservations = vma_resv_map(vma);
++
++	if (!reservations)
++		return;
++	kref_put(&reservations->refs, resv_map_release);
++}
++
+ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ {
+ 	struct hstate *h = hstate_vma(vma);
+@@ -2075,7 +2084,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ 		reserve = (end - start) -
+ 			region_count(&reservations->regions, start, end);
+ 
+-		kref_put(&reservations->refs, resv_map_release);
++		resv_map_put(vma);
+ 
+ 		if (reserve) {
+ 			hugetlb_acct_memory(h, -reserve);
+@@ -2877,12 +2886,16 @@ int hugetlb_reserve_pages(struct inode *inode,
+ 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
+ 	}
+ 
+-	if (chg < 0)
+-		return chg;
++	if (chg < 0) {
++		ret = chg;
++		goto out_err;
++	}
+ 
+ 	/* There must be enough filesystem quota for the mapping */
+-	if (hugetlb_get_quota(inode->i_mapping, chg))
+-		return -ENOSPC;
++	if (hugetlb_get_quota(inode->i_mapping, chg)) {
++		ret = -ENOSPC;
++		goto out_err;
++	}
+ 
+ 	/*
+ 	 * Check enough hugepages are available for the reservation.
+@@ -2891,7 +2904,7 @@ int hugetlb_reserve_pages(struct inode *inode,
+ 	ret = hugetlb_acct_memory(h, chg);
+ 	if (ret < 0) {
+ 		hugetlb_put_quota(inode->i_mapping, chg);
+-		return ret;
++		goto out_err;
+ 	}
+ 
+ 	/*
+@@ -2908,6 +2921,10 @@ int hugetlb_reserve_pages(struct inode *inode,
+ 	if (!vma || vma->vm_flags & VM_MAYSHARE)
+ 		region_add(&inode->i_mapping->private_list, from, to);
+ 	return 0;
++out_err:
++	if (vma)
++		resv_map_put(vma);
++	return ret;
+ }
+ 
+ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 43b44db..bdb7004 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -256,7 +256,7 @@ struct vmap_area {
+ 	struct rb_node rb_node;		/* address sorted rbtree */
+ 	struct list_head list;		/* address sorted list */
+ 	struct list_head purge_list;	/* "lazy purge" list */
+-	void *private;
++	struct vm_struct *vm;
+ 	struct rcu_head rcu_head;
+ };
+ 
+@@ -1174,9 +1174,10 @@ void __init vmalloc_init(void)
+ 	/* Import existing vmlist entries. */
+ 	for (tmp = vmlist; tmp; tmp = tmp->next) {
+ 		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
+-		va->flags = tmp->flags | VM_VM_AREA;
++		va->flags = VM_VM_AREA;
+ 		va->va_start = (unsigned long)tmp->addr;
+ 		va->va_end = va->va_start + tmp->size;
++		va->vm = tmp;
+ 		__insert_vmap_area(va);
+ 	}
+ 
+@@ -1274,7 +1275,7 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+ 	vm->addr = (void *)va->va_start;
+ 	vm->size = va->va_end - va->va_start;
+ 	vm->caller = caller;
+-	va->private = vm;
++	va->vm = vm;
+ 	va->flags |= VM_VM_AREA;
+ }
+ 
+@@ -1397,7 +1398,7 @@ static struct vm_struct *find_vm_area(const void *addr)
+ 
+ 	va = find_vmap_area((unsigned long)addr);
+ 	if (va && va->flags & VM_VM_AREA)
+-		return va->private;
++		return va->vm;
+ 
+ 	return NULL;
+ }
+@@ -1416,7 +1417,7 @@ struct vm_struct *remove_vm_area(const void *addr)
+ 
+ 	va = find_vmap_area((unsigned long)addr);
+ 	if (va && va->flags & VM_VM_AREA) {
+-		struct vm_struct *vm = va->private;
++		struct vm_struct *vm = va->vm;
+ 
+ 		if (!(vm->flags & VM_UNLIST)) {
+ 			struct vm_struct *tmp, **p;
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 895eec1..65f3764c 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -498,6 +498,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_configure_filter(local);
+ 		break;
+ 	default:
++		mutex_lock(&local->mtx);
++		if (local->hw_roc_dev == sdata->dev &&
++		    local->hw_roc_channel) {
++			/* ignore return value since this is racy */
++			drv_cancel_remain_on_channel(local);
++			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++		}
++		mutex_unlock(&local->mtx);
++
++		flush_work(&local->hw_roc_start);
++		flush_work(&local->hw_roc_done);
++
+ 		flush_work(&sdata->work);
+ 		/*
+ 		 * When we get here, the interface is marked down.
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 13427b1..c55eb9d 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -251,6 +251,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
+ 		return;
+ 	}
+ 
++	/* was never transmitted */
++	if (local->hw_roc_skb) {
++		u64 cookie;
++
++		cookie = local->hw_roc_cookie ^ 2;
++
++		cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
++					local->hw_roc_skb->data,
++					local->hw_roc_skb->len, false,
++					GFP_KERNEL);
++
++		kfree_skb(local->hw_roc_skb);
++		local->hw_roc_skb = NULL;
++		local->hw_roc_skb_for_status = NULL;
++	}
++
+ 	if (!local->hw_roc_for_tx)
+ 		cfg80211_remain_on_channel_expired(local->hw_roc_dev,
+ 						   local->hw_roc_cookie,
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 9c22330..30f68dc 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -937,6 +937,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ 				  enum nl80211_iftype iftype)
+ {
+ 	struct wireless_dev *wdev_iter;
++	u32 used_iftypes = BIT(iftype);
+ 	int num[NUM_NL80211_IFTYPES];
+ 	int total = 1;
+ 	int i, j;
+@@ -970,12 +971,14 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ 
+ 		num[wdev_iter->iftype]++;
+ 		total++;
++		used_iftypes |= BIT(wdev_iter->iftype);
+ 	}
+ 	mutex_unlock(&rdev->devlist_mtx);
+ 
+ 	for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
+ 		const struct ieee80211_iface_combination *c;
+ 		struct ieee80211_iface_limit *limits;
++		u32 all_iftypes = 0;
+ 
+ 		c = &rdev->wiphy.iface_combinations[i];
+ 
+@@ -990,6 +993,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ 			if (rdev->wiphy.software_iftypes & BIT(iftype))
+ 				continue;
+ 			for (j = 0; j < c->n_limits; j++) {
++				all_iftypes |= limits[j].types;
+ 				if (!(limits[j].types & BIT(iftype)))
+ 					continue;
+ 				if (limits[j].max < num[iftype])
+@@ -997,7 +1001,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ 				limits[j].max -= num[iftype];
+ 			}
+ 		}
+-		/* yay, it fits */
++
++		/*
++		 * Finally check that all iftypes that we're currently
++		 * using are actually part of this combination. If they
++		 * aren't then we can't use this combination and have
++		 * to continue to the next.
++		 */
++		if ((all_iftypes & used_iftypes) != used_iftypes)
++			goto cont;
++
++		/*
++		 * This combination covered all interface types and
++		 * supported the requested numbers, so we're good.
++		 */
+ 		kfree(limits);
+ 		return 0;
+  cont:



Navigation:
Lists: gentoo-commits: < Prev By Thread Next > < Prev By Date Next >
Previous by thread:
gentoo commit in xml/htdocs/doc/en: gentoo-kernel.xml xen-guide.xml
Next by thread:
gentoo-x86 commit in dev-java/tagsoup: ChangeLog tagsoup-1.2.1.ebuild
Previous by date:
gentoo commit in xml/htdocs/doc/en: gentoo-kernel.xml xen-guide.xml
Next by date:
proj/gentoo-packages:master commit in: gpackages/apps/packages/, gpackages/apps/packages/management/commands/


Updated Jun 26, 2012

Summary: Archive of the gentoo-commits mailing list.

Donate to support our development efforts.

Copyright 2001-2013 Gentoo Foundation, Inc. Questions, Comments? Contact us.